1#ifndef _LINUX_SCHED_H
2#define _LINUX_SCHED_H
3
4#include <uapi/linux/sched.h>
5
6#include <linux/sched/prio.h>
7
8
9struct sched_param {
10 int sched_priority;
11};
12
13#include <asm/param.h>
14
15#include <linux/capability.h>
16#include <linux/threads.h>
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/timex.h>
20#include <linux/jiffies.h>
21#include <linux/plist.h>
22#include <linux/rbtree.h>
23#include <linux/thread_info.h>
24#include <linux/cpumask.h>
25#include <linux/errno.h>
26#include <linux/nodemask.h>
27#include <linux/mm_types.h>
28#include <linux/preempt_mask.h>
29
30#include <asm/page.h>
31#include <asm/ptrace.h>
32#include <linux/cputime.h>
33
34#include <linux/smp.h>
35#include <linux/sem.h>
36#include <linux/shm.h>
37#include <linux/signal.h>
38#include <linux/compiler.h>
39#include <linux/completion.h>
40#include <linux/pid.h>
41#include <linux/percpu.h>
42#include <linux/topology.h>
43#include <linux/proportions.h>
44#include <linux/seccomp.h>
45#include <linux/rcupdate.h>
46#include <linux/rculist.h>
47#include <linux/rtmutex.h>
48
49#include <linux/time.h>
50#include <linux/param.h>
51#include <linux/resource.h>
52#include <linux/timer.h>
53#include <linux/hrtimer.h>
54#include <linux/task_io_accounting.h>
55#include <linux/latencytop.h>
56#include <linux/cred.h>
57#include <linux/llist.h>
58#include <linux/uidgid.h>
59#include <linux/gfp.h>
60#include <linux/magic.h>
61
62#include <asm/processor.h>
63
64#define SCHED_ATTR_SIZE_VER0 48
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110struct sched_attr {
111 u32 size;
112
113 u32 sched_policy;
114 u64 sched_flags;
115
116
117 s32 sched_nice;
118
119
120 u32 sched_priority;
121
122
123 u64 sched_runtime;
124 u64 sched_deadline;
125 u64 sched_period;
126};
127
128struct exec_domain;
129struct futex_pi_state;
130struct robust_list_head;
131struct bio_list;
132struct fs_struct;
133struct perf_event_context;
134struct blk_plug;
135struct filename;
136
137#define VMACACHE_BITS 2
138#define VMACACHE_SIZE (1U << VMACACHE_BITS)
139#define VMACACHE_MASK (VMACACHE_SIZE - 1)
140
141
142
143
144
145
146
147
148
149
150
151extern unsigned long avenrun[];
152extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
153
154#define FSHIFT 11
155#define FIXED_1 (1<<FSHIFT)
156#define LOAD_FREQ (5*HZ+1)
157#define EXP_1 1884
158#define EXP_5 2014
159#define EXP_15 2037
160
161#define CALC_LOAD(load,exp,n) \
162 load *= exp; \
163 load += n*(FIXED_1-exp); \
164 load >>= FSHIFT;
165
166extern unsigned long total_forks;
167extern int nr_threads;
168DECLARE_PER_CPU(unsigned long, process_counts);
169extern int nr_processes(void);
170extern unsigned long nr_running(void);
171extern bool single_task_running(void);
172extern unsigned long nr_iowait(void);
173extern unsigned long nr_iowait_cpu(int cpu);
174extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
175
176extern void calc_global_load(unsigned long ticks);
177extern void update_cpu_load_nohz(void);
178
179extern unsigned long get_parent_ip(unsigned long addr);
180
181extern void dump_cpu_task(int cpu);
182
183struct seq_file;
184struct cfs_rq;
185struct task_group;
186#ifdef CONFIG_SCHED_DEBUG
187extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
188extern void proc_sched_set_task(struct task_struct *p);
189extern void
190print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
191#endif
192
193
194
195
196
197
198
199
200
201
202
203#define TASK_RUNNING 0
204#define TASK_INTERRUPTIBLE 1
205#define TASK_UNINTERRUPTIBLE 2
206#define __TASK_STOPPED 4
207#define __TASK_TRACED 8
208
209#define EXIT_DEAD 16
210#define EXIT_ZOMBIE 32
211#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
212
213#define TASK_DEAD 64
214#define TASK_WAKEKILL 128
215#define TASK_WAKING 256
216#define TASK_PARKED 512
217#define TASK_STATE_MAX 1024
218
219#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWP"
220
221extern char ___assert_task_state[1 - 2*!!(
222 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
223
224
225#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
226#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
227#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
228
229
230#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
231#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
232
233
234#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
235 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
236 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
237
238#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
239#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
240#define task_is_stopped_or_traced(task) \
241 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
242#define task_contributes_to_load(task) \
243 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
244 (task->flags & PF_FROZEN) == 0)
245
246#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
247
248#define __set_task_state(tsk, state_value) \
249 do { \
250 (tsk)->task_state_change = _THIS_IP_; \
251 (tsk)->state = (state_value); \
252 } while (0)
253#define set_task_state(tsk, state_value) \
254 do { \
255 (tsk)->task_state_change = _THIS_IP_; \
256 set_mb((tsk)->state, (state_value)); \
257 } while (0)
258
259
260
261
262
263
264
265
266
267
268
269
270#define __set_current_state(state_value) \
271 do { \
272 current->task_state_change = _THIS_IP_; \
273 current->state = (state_value); \
274 } while (0)
275#define set_current_state(state_value) \
276 do { \
277 current->task_state_change = _THIS_IP_; \
278 set_mb(current->state, (state_value)); \
279 } while (0)
280
281#else
282
283#define __set_task_state(tsk, state_value) \
284 do { (tsk)->state = (state_value); } while (0)
285#define set_task_state(tsk, state_value) \
286 set_mb((tsk)->state, (state_value))
287
288
289
290
291
292
293
294
295
296
297
298
299#define __set_current_state(state_value) \
300 do { current->state = (state_value); } while (0)
301#define set_current_state(state_value) \
302 set_mb(current->state, (state_value))
303
304#endif
305
306
307#define TASK_COMM_LEN 16
308
309#include <linux/spinlock.h>
310
311
312
313
314
315
316
317extern rwlock_t tasklist_lock;
318extern spinlock_t mmlist_lock;
319
320struct task_struct;
321
322#ifdef CONFIG_PROVE_RCU
323extern int lockdep_tasklist_lock_is_held(void);
324#endif
325
326extern void sched_init(void);
327extern void sched_init_smp(void);
328extern asmlinkage void schedule_tail(struct task_struct *prev);
329extern void init_idle(struct task_struct *idle, int cpu);
330extern void init_idle_bootup_task(struct task_struct *idle);
331
332extern int runqueue_is_locked(int cpu);
333
334#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
335extern void nohz_balance_enter_idle(int cpu);
336extern void set_cpu_sd_state_idle(void);
337extern int get_nohz_timer_target(int pinned);
338#else
339static inline void nohz_balance_enter_idle(int cpu) { }
340static inline void set_cpu_sd_state_idle(void) { }
341static inline int get_nohz_timer_target(int pinned)
342{
343 return smp_processor_id();
344}
345#endif
346
347
348
349
350extern void show_state_filter(unsigned long state_filter);
351
352static inline void show_state(void)
353{
354 show_state_filter(0);
355}
356
357extern void show_regs(struct pt_regs *);
358
359
360
361
362
363
364extern void show_stack(struct task_struct *task, unsigned long *sp);
365
366void io_schedule(void);
367long io_schedule_timeout(long timeout);
368
369extern void cpu_init (void);
370extern void trap_init(void);
371extern void update_process_times(int user);
372extern void scheduler_tick(void);
373
374extern void sched_show_task(struct task_struct *p);
375
376#ifdef CONFIG_LOCKUP_DETECTOR
377extern void touch_softlockup_watchdog(void);
378extern void touch_softlockup_watchdog_sync(void);
379extern void touch_all_softlockup_watchdogs(void);
380extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
381 void __user *buffer,
382 size_t *lenp, loff_t *ppos);
383extern unsigned int softlockup_panic;
384void lockup_detector_init(void);
385#else
386static inline void touch_softlockup_watchdog(void)
387{
388}
389static inline void touch_softlockup_watchdog_sync(void)
390{
391}
392static inline void touch_all_softlockup_watchdogs(void)
393{
394}
395static inline void lockup_detector_init(void)
396{
397}
398#endif
399
400#ifdef CONFIG_DETECT_HUNG_TASK
401void reset_hung_task_detector(void);
402#else
403static inline void reset_hung_task_detector(void)
404{
405}
406#endif
407
408
409#define __sched __attribute__((__section__(".sched.text")))
410
411
412extern char __sched_text_start[], __sched_text_end[];
413
414
415extern int in_sched_functions(unsigned long addr);
416
417#define MAX_SCHEDULE_TIMEOUT LONG_MAX
418extern signed long schedule_timeout(signed long timeout);
419extern signed long schedule_timeout_interruptible(signed long timeout);
420extern signed long schedule_timeout_killable(signed long timeout);
421extern signed long schedule_timeout_uninterruptible(signed long timeout);
422asmlinkage void schedule(void);
423extern void schedule_preempt_disabled(void);
424
425struct nsproxy;
426struct user_namespace;
427
428#ifdef CONFIG_MMU
429extern void arch_pick_mmap_layout(struct mm_struct *mm);
430extern unsigned long
431arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
432 unsigned long, unsigned long);
433extern unsigned long
434arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
435 unsigned long len, unsigned long pgoff,
436 unsigned long flags);
437#else
438static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
439#endif
440
441#define SUID_DUMP_DISABLE 0
442#define SUID_DUMP_USER 1
443#define SUID_DUMP_ROOT 2
444
445
446
447
448#define MMF_DUMPABLE_BITS 2
449#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
450
451extern void set_dumpable(struct mm_struct *mm, int value);
452
453
454
455
456
457
458static inline int __get_dumpable(unsigned long mm_flags)
459{
460 return mm_flags & MMF_DUMPABLE_MASK;
461}
462
463static inline int get_dumpable(struct mm_struct *mm)
464{
465 return __get_dumpable(mm->flags);
466}
467
468
469#define MMF_DUMP_ANON_PRIVATE 2
470#define MMF_DUMP_ANON_SHARED 3
471#define MMF_DUMP_MAPPED_PRIVATE 4
472#define MMF_DUMP_MAPPED_SHARED 5
473#define MMF_DUMP_ELF_HEADERS 6
474#define MMF_DUMP_HUGETLB_PRIVATE 7
475#define MMF_DUMP_HUGETLB_SHARED 8
476
477#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
478#define MMF_DUMP_FILTER_BITS 7
479#define MMF_DUMP_FILTER_MASK \
480 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
481#define MMF_DUMP_FILTER_DEFAULT \
482 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\
483 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
484
485#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
486# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS)
487#else
488# define MMF_DUMP_MASK_DEFAULT_ELF 0
489#endif
490
491#define MMF_VM_MERGEABLE 16
492#define MMF_VM_HUGEPAGE 17
493#define MMF_EXE_FILE_CHANGED 18
494
495#define MMF_HAS_UPROBES 19
496#define MMF_RECALC_UPROBES 20
497
498#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
499
500struct sighand_struct {
501 atomic_t count;
502 struct k_sigaction action[_NSIG];
503 spinlock_t siglock;
504 wait_queue_head_t signalfd_wqh;
505};
506
507struct pacct_struct {
508 int ac_flag;
509 long ac_exitcode;
510 unsigned long ac_mem;
511 cputime_t ac_utime, ac_stime;
512 unsigned long ac_minflt, ac_majflt;
513};
514
515struct cpu_itimer {
516 cputime_t expires;
517 cputime_t incr;
518 u32 error;
519 u32 incr_error;
520};
521
522
523
524
525
526
527
528
529struct cputime {
530 cputime_t utime;
531 cputime_t stime;
532};
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548struct task_cputime {
549 cputime_t utime;
550 cputime_t stime;
551 unsigned long long sum_exec_runtime;
552};
553
554#define prof_exp stime
555#define virt_exp utime
556#define sched_exp sum_exec_runtime
557
558#define INIT_CPUTIME \
559 (struct task_cputime) { \
560 .utime = 0, \
561 .stime = 0, \
562 .sum_exec_runtime = 0, \
563 }
564
565#ifdef CONFIG_PREEMPT_COUNT
566#define PREEMPT_DISABLED (1 + PREEMPT_ENABLED)
567#else
568#define PREEMPT_DISABLED PREEMPT_ENABLED
569#endif
570
571
572
573
574
575
576
577
578#define INIT_PREEMPT_COUNT (PREEMPT_DISABLED + PREEMPT_ACTIVE)
579
580
581
582
583
584
585
586
587
588
589
590struct thread_group_cputimer {
591 struct task_cputime cputime;
592 int running;
593 raw_spinlock_t lock;
594};
595
596#include <linux/rwsem.h>
597struct autogroup;
598
599
600
601
602
603
604
605
606struct signal_struct {
607 atomic_t sigcnt;
608 atomic_t live;
609 int nr_threads;
610 struct list_head thread_head;
611
612 wait_queue_head_t wait_chldexit;
613
614
615 struct task_struct *curr_target;
616
617
618 struct sigpending shared_pending;
619
620
621 int group_exit_code;
622
623
624
625
626
627 int notify_count;
628 struct task_struct *group_exit_task;
629
630
631 int group_stop_count;
632 unsigned int flags;
633
634
635
636
637
638
639
640
641
642
643 unsigned int is_child_subreaper:1;
644 unsigned int has_child_subreaper:1;
645
646
647 int posix_timer_id;
648 struct list_head posix_timers;
649
650
651 struct hrtimer real_timer;
652 struct pid *leader_pid;
653 ktime_t it_real_incr;
654
655
656
657
658
659
660 struct cpu_itimer it[2];
661
662
663
664
665
666 struct thread_group_cputimer cputimer;
667
668
669 struct task_cputime cputime_expires;
670
671 struct list_head cpu_timers[3];
672
673 struct pid *tty_old_pgrp;
674
675
676 int leader;
677
678 struct tty_struct *tty;
679
680#ifdef CONFIG_SCHED_AUTOGROUP
681 struct autogroup *autogroup;
682#endif
683
684
685
686
687
688
689 seqlock_t stats_lock;
690 cputime_t utime, stime, cutime, cstime;
691 cputime_t gtime;
692 cputime_t cgtime;
693#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
694 struct cputime prev_cputime;
695#endif
696 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
697 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
698 unsigned long inblock, oublock, cinblock, coublock;
699 unsigned long maxrss, cmaxrss;
700 struct task_io_accounting ioac;
701
702
703
704
705
706
707
708 unsigned long long sum_sched_runtime;
709
710
711
712
713
714
715
716
717
718
719 struct rlimit rlim[RLIM_NLIMITS];
720
721#ifdef CONFIG_BSD_PROCESS_ACCT
722 struct pacct_struct pacct;
723#endif
724#ifdef CONFIG_TASKSTATS
725 struct taskstats *stats;
726#endif
727#ifdef CONFIG_AUDIT
728 unsigned audit_tty;
729 unsigned audit_tty_log_passwd;
730 struct tty_audit_buf *tty_audit_buf;
731#endif
732#ifdef CONFIG_CGROUPS
733
734
735
736
737
738
739
740
741
742 struct rw_semaphore group_rwsem;
743#endif
744
745 oom_flags_t oom_flags;
746 short oom_score_adj;
747 short oom_score_adj_min;
748
749
750 struct mutex cred_guard_mutex;
751
752
753};
754
755
756
757
758#define SIGNAL_STOP_STOPPED 0x00000001
759#define SIGNAL_STOP_CONTINUED 0x00000002
760#define SIGNAL_GROUP_EXIT 0x00000004
761#define SIGNAL_GROUP_COREDUMP 0x00000008
762
763
764
765#define SIGNAL_CLD_STOPPED 0x00000010
766#define SIGNAL_CLD_CONTINUED 0x00000020
767#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
768
769#define SIGNAL_UNKILLABLE 0x00000040
770
771
772static inline int signal_group_exit(const struct signal_struct *sig)
773{
774 return (sig->flags & SIGNAL_GROUP_EXIT) ||
775 (sig->group_exit_task != NULL);
776}
777
778
779
780
781struct user_struct {
782 atomic_t __count;
783 atomic_t processes;
784 atomic_t sigpending;
785#ifdef CONFIG_INOTIFY_USER
786 atomic_t inotify_watches;
787 atomic_t inotify_devs;
788#endif
789#ifdef CONFIG_FANOTIFY
790 atomic_t fanotify_listeners;
791#endif
792#ifdef CONFIG_EPOLL
793 atomic_long_t epoll_watches;
794#endif
795#ifdef CONFIG_POSIX_MQUEUE
796
797 unsigned long mq_bytes;
798#endif
799 unsigned long locked_shm;
800
801#ifdef CONFIG_KEYS
802 struct key *uid_keyring;
803 struct key *session_keyring;
804#endif
805
806
807 struct hlist_node uidhash_node;
808 kuid_t uid;
809
810#ifdef CONFIG_PERF_EVENTS
811 atomic_long_t locked_vm;
812#endif
813};
814
815extern int uids_sysfs_init(void);
816
817extern struct user_struct *find_user(kuid_t);
818
819extern struct user_struct root_user;
820#define INIT_USER (&root_user)
821
822
823struct backing_dev_info;
824struct reclaim_state;
825
826#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
827struct sched_info {
828
829 unsigned long pcount;
830 unsigned long long run_delay;
831
832
833 unsigned long long last_arrival,
834 last_queued;
835};
836#endif
837
838#ifdef CONFIG_TASK_DELAY_ACCT
839struct task_delay_info {
840 spinlock_t lock;
841 unsigned int flags;
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858 u64 blkio_start;
859 u64 blkio_delay;
860 u64 swapin_delay;
861 u32 blkio_count;
862
863 u32 swapin_count;
864
865
866 u64 freepages_start;
867 u64 freepages_delay;
868 u32 freepages_count;
869};
870#endif
871
872static inline int sched_info_on(void)
873{
874#ifdef CONFIG_SCHEDSTATS
875 return 1;
876#elif defined(CONFIG_TASK_DELAY_ACCT)
877 extern int delayacct_on;
878 return delayacct_on;
879#else
880 return 0;
881#endif
882}
883
884enum cpu_idle_type {
885 CPU_IDLE,
886 CPU_NOT_IDLE,
887 CPU_NEWLY_IDLE,
888 CPU_MAX_IDLE_TYPES
889};
890
891
892
893
894#define SCHED_CAPACITY_SHIFT 10
895#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
896
897
898
899
900#ifdef CONFIG_SMP
901#define SD_LOAD_BALANCE 0x0001
902#define SD_BALANCE_NEWIDLE 0x0002
903#define SD_BALANCE_EXEC 0x0004
904#define SD_BALANCE_FORK 0x0008
905#define SD_BALANCE_WAKE 0x0010
906#define SD_WAKE_AFFINE 0x0020
907#define SD_SHARE_CPUCAPACITY 0x0080
908#define SD_SHARE_POWERDOMAIN 0x0100
909#define SD_SHARE_PKG_RESOURCES 0x0200
910#define SD_SERIALIZE 0x0400
911#define SD_ASYM_PACKING 0x0800
912#define SD_PREFER_SIBLING 0x1000
913#define SD_OVERLAP 0x2000
914#define SD_NUMA 0x4000
915
916#ifdef CONFIG_SCHED_SMT
917static inline int cpu_smt_flags(void)
918{
919 return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
920}
921#endif
922
923#ifdef CONFIG_SCHED_MC
924static inline int cpu_core_flags(void)
925{
926 return SD_SHARE_PKG_RESOURCES;
927}
928#endif
929
930#ifdef CONFIG_NUMA
931static inline int cpu_numa_flags(void)
932{
933 return SD_NUMA;
934}
935#endif
936
937struct sched_domain_attr {
938 int relax_domain_level;
939};
940
941#define SD_ATTR_INIT (struct sched_domain_attr) { \
942 .relax_domain_level = -1, \
943}
944
945extern int sched_domain_level_max;
946
947struct sched_group;
948
949struct sched_domain {
950
951 struct sched_domain *parent;
952 struct sched_domain *child;
953 struct sched_group *groups;
954 unsigned long min_interval;
955 unsigned long max_interval;
956 unsigned int busy_factor;
957 unsigned int imbalance_pct;
958 unsigned int cache_nice_tries;
959 unsigned int busy_idx;
960 unsigned int idle_idx;
961 unsigned int newidle_idx;
962 unsigned int wake_idx;
963 unsigned int forkexec_idx;
964 unsigned int smt_gain;
965
966 int nohz_idle;
967 int flags;
968 int level;
969
970
971 unsigned long last_balance;
972 unsigned int balance_interval;
973 unsigned int nr_balance_failed;
974
975
976 u64 max_newidle_lb_cost;
977 unsigned long next_decay_max_lb_cost;
978
979#ifdef CONFIG_SCHEDSTATS
980
981 unsigned int lb_count[CPU_MAX_IDLE_TYPES];
982 unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
983 unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
984 unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
985 unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
986 unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
987 unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
988 unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
989
990
991 unsigned int alb_count;
992 unsigned int alb_failed;
993 unsigned int alb_pushed;
994
995
996 unsigned int sbe_count;
997 unsigned int sbe_balanced;
998 unsigned int sbe_pushed;
999
1000
1001 unsigned int sbf_count;
1002 unsigned int sbf_balanced;
1003 unsigned int sbf_pushed;
1004
1005
1006 unsigned int ttwu_wake_remote;
1007 unsigned int ttwu_move_affine;
1008 unsigned int ttwu_move_balance;
1009#endif
1010#ifdef CONFIG_SCHED_DEBUG
1011 char *name;
1012#endif
1013 union {
1014 void *private;
1015 struct rcu_head rcu;
1016 };
1017
1018 unsigned int span_weight;
1019
1020
1021
1022
1023
1024
1025
1026 unsigned long span[0];
1027};
1028
1029static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
1030{
1031 return to_cpumask(sd->span);
1032}
1033
1034extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1035 struct sched_domain_attr *dattr_new);
1036
1037
1038cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
1039void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
1040
1041bool cpus_share_cache(int this_cpu, int that_cpu);
1042
1043typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
1044typedef int (*sched_domain_flags_f)(void);
1045
1046#define SDTL_OVERLAP 0x01
1047
1048struct sd_data {
1049 struct sched_domain **__percpu sd;
1050 struct sched_group **__percpu sg;
1051 struct sched_group_capacity **__percpu sgc;
1052};
1053
1054struct sched_domain_topology_level {
1055 sched_domain_mask_f mask;
1056 sched_domain_flags_f sd_flags;
1057 int flags;
1058 int numa_level;
1059 struct sd_data data;
1060#ifdef CONFIG_SCHED_DEBUG
1061 char *name;
1062#endif
1063};
1064
1065extern struct sched_domain_topology_level *sched_domain_topology;
1066
1067extern void set_sched_topology(struct sched_domain_topology_level *tl);
1068extern void wake_up_if_idle(int cpu);
1069
1070#ifdef CONFIG_SCHED_DEBUG
1071# define SD_INIT_NAME(type) .name = #type
1072#else
1073# define SD_INIT_NAME(type)
1074#endif
1075
1076#else
1077
1078struct sched_domain_attr;
1079
1080static inline void
1081partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1082 struct sched_domain_attr *dattr_new)
1083{
1084}
1085
1086static inline bool cpus_share_cache(int this_cpu, int that_cpu)
1087{
1088 return true;
1089}
1090
1091#endif
1092
1093
1094struct io_context;
1095
1096
1097#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
1098extern void prefetch_stack(struct task_struct *t);
1099#else
1100static inline void prefetch_stack(struct task_struct *t) { }
1101#endif
1102
1103struct audit_context;
1104struct mempolicy;
1105struct pipe_inode_info;
1106struct uts_namespace;
1107
1108struct load_weight {
1109 unsigned long weight;
1110 u32 inv_weight;
1111};
1112
1113struct sched_avg {
1114
1115
1116
1117
1118
1119 u32 runnable_avg_sum, runnable_avg_period;
1120 u64 last_runnable_update;
1121 s64 decay_count;
1122 unsigned long load_avg_contrib;
1123};
1124
1125#ifdef CONFIG_SCHEDSTATS
1126struct sched_statistics {
1127 u64 wait_start;
1128 u64 wait_max;
1129 u64 wait_count;
1130 u64 wait_sum;
1131 u64 iowait_count;
1132 u64 iowait_sum;
1133
1134 u64 sleep_start;
1135 u64 sleep_max;
1136 s64 sum_sleep_runtime;
1137
1138 u64 block_start;
1139 u64 block_max;
1140 u64 exec_max;
1141 u64 slice_max;
1142
1143 u64 nr_migrations_cold;
1144 u64 nr_failed_migrations_affine;
1145 u64 nr_failed_migrations_running;
1146 u64 nr_failed_migrations_hot;
1147 u64 nr_forced_migrations;
1148
1149 u64 nr_wakeups;
1150 u64 nr_wakeups_sync;
1151 u64 nr_wakeups_migrate;
1152 u64 nr_wakeups_local;
1153 u64 nr_wakeups_remote;
1154 u64 nr_wakeups_affine;
1155 u64 nr_wakeups_affine_attempts;
1156 u64 nr_wakeups_passive;
1157 u64 nr_wakeups_idle;
1158};
1159#endif
1160
1161struct sched_entity {
1162 struct load_weight load;
1163 struct rb_node run_node;
1164 struct list_head group_node;
1165 unsigned int on_rq;
1166
1167 u64 exec_start;
1168 u64 sum_exec_runtime;
1169 u64 vruntime;
1170 u64 prev_sum_exec_runtime;
1171
1172 u64 nr_migrations;
1173
1174#ifdef CONFIG_SCHEDSTATS
1175 struct sched_statistics statistics;
1176#endif
1177
1178#ifdef CONFIG_FAIR_GROUP_SCHED
1179 int depth;
1180 struct sched_entity *parent;
1181
1182 struct cfs_rq *cfs_rq;
1183
1184 struct cfs_rq *my_q;
1185#endif
1186
1187#ifdef CONFIG_SMP
1188
1189 struct sched_avg avg;
1190#endif
1191};
1192
1193struct sched_rt_entity {
1194 struct list_head run_list;
1195 unsigned long timeout;
1196 unsigned long watchdog_stamp;
1197 unsigned int time_slice;
1198
1199 struct sched_rt_entity *back;
1200#ifdef CONFIG_RT_GROUP_SCHED
1201 struct sched_rt_entity *parent;
1202
1203 struct rt_rq *rt_rq;
1204
1205 struct rt_rq *my_q;
1206#endif
1207};
1208
1209struct sched_dl_entity {
1210 struct rb_node rb_node;
1211
1212
1213
1214
1215
1216
1217 u64 dl_runtime;
1218 u64 dl_deadline;
1219 u64 dl_period;
1220 u64 dl_bw;
1221
1222
1223
1224
1225
1226
1227 s64 runtime;
1228 u64 deadline;
1229 unsigned int flags;
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249 int dl_throttled, dl_new, dl_boosted, dl_yielded;
1250
1251
1252
1253
1254
1255 struct hrtimer dl_timer;
1256};
1257
1258union rcu_special {
1259 struct {
1260 bool blocked;
1261 bool need_qs;
1262 } b;
1263 short s;
1264};
1265struct rcu_node;
1266
1267enum perf_event_task_context {
1268 perf_invalid_context = -1,
1269 perf_hw_context = 0,
1270 perf_sw_context,
1271 perf_nr_task_contexts,
1272};
1273
1274struct task_struct {
1275 volatile long state;
1276 void *stack;
1277 atomic_t usage;
1278 unsigned int flags;
1279 unsigned int ptrace;
1280
1281#ifdef CONFIG_SMP
1282 struct llist_node wake_entry;
1283 int on_cpu;
1284 struct task_struct *last_wakee;
1285 unsigned long wakee_flips;
1286 unsigned long wakee_flip_decay_ts;
1287
1288 int wake_cpu;
1289#endif
1290 int on_rq;
1291
1292 int prio, static_prio, normal_prio;
1293 unsigned int rt_priority;
1294 const struct sched_class *sched_class;
1295 struct sched_entity se;
1296 struct sched_rt_entity rt;
1297#ifdef CONFIG_CGROUP_SCHED
1298 struct task_group *sched_task_group;
1299#endif
1300 struct sched_dl_entity dl;
1301
1302#ifdef CONFIG_PREEMPT_NOTIFIERS
1303
1304 struct hlist_head preempt_notifiers;
1305#endif
1306
1307#ifdef CONFIG_BLK_DEV_IO_TRACE
1308 unsigned int btrace_seq;
1309#endif
1310
1311 unsigned int policy;
1312 int nr_cpus_allowed;
1313 cpumask_t cpus_allowed;
1314
1315#ifdef CONFIG_PREEMPT_RCU
1316 int rcu_read_lock_nesting;
1317 union rcu_special rcu_read_unlock_special;
1318 struct list_head rcu_node_entry;
1319#endif
1320#ifdef CONFIG_PREEMPT_RCU
1321 struct rcu_node *rcu_blocked_node;
1322#endif
1323#ifdef CONFIG_TASKS_RCU
1324 unsigned long rcu_tasks_nvcsw;
1325 bool rcu_tasks_holdout;
1326 struct list_head rcu_tasks_holdout_list;
1327 int rcu_tasks_idle_cpu;
1328#endif
1329
1330#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1331 struct sched_info sched_info;
1332#endif
1333
1334 struct list_head tasks;
1335#ifdef CONFIG_SMP
1336 struct plist_node pushable_tasks;
1337 struct rb_node pushable_dl_tasks;
1338#endif
1339
1340 struct mm_struct *mm, *active_mm;
1341#ifdef CONFIG_COMPAT_BRK
1342 unsigned brk_randomized:1;
1343#endif
1344
1345 u32 vmacache_seqnum;
1346 struct vm_area_struct *vmacache[VMACACHE_SIZE];
1347#if defined(SPLIT_RSS_COUNTING)
1348 struct task_rss_stat rss_stat;
1349#endif
1350
1351 int exit_state;
1352 int exit_code, exit_signal;
1353 int pdeath_signal;
1354 unsigned int jobctl;
1355
1356
1357 unsigned int personality;
1358
1359 unsigned in_execve:1;
1360
1361 unsigned in_iowait:1;
1362
1363
1364 unsigned sched_reset_on_fork:1;
1365 unsigned sched_contributes_to_load:1;
1366
1367#ifdef CONFIG_MEMCG_KMEM
1368 unsigned memcg_kmem_skip_account:1;
1369#endif
1370
1371 unsigned long atomic_flags;
1372
1373 pid_t pid;
1374 pid_t tgid;
1375
1376#ifdef CONFIG_CC_STACKPROTECTOR
1377
1378 unsigned long stack_canary;
1379#endif
1380
1381
1382
1383
1384
1385 struct task_struct __rcu *real_parent;
1386 struct task_struct __rcu *parent;
1387
1388
1389
1390 struct list_head children;
1391 struct list_head sibling;
1392 struct task_struct *group_leader;
1393
1394
1395
1396
1397
1398
1399 struct list_head ptraced;
1400 struct list_head ptrace_entry;
1401
1402
1403 struct pid_link pids[PIDTYPE_MAX];
1404 struct list_head thread_group;
1405 struct list_head thread_node;
1406
1407 struct completion *vfork_done;
1408 int __user *set_child_tid;
1409 int __user *clear_child_tid;
1410
1411 cputime_t utime, stime, utimescaled, stimescaled;
1412 cputime_t gtime;
1413#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
1414 struct cputime prev_cputime;
1415#endif
1416#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1417 seqlock_t vtime_seqlock;
1418 unsigned long long vtime_snap;
1419 enum {
1420 VTIME_SLEEPING = 0,
1421 VTIME_USER,
1422 VTIME_SYS,
1423 } vtime_snap_whence;
1424#endif
1425 unsigned long nvcsw, nivcsw;
1426 u64 start_time;
1427 u64 real_start_time;
1428
1429 unsigned long min_flt, maj_flt;
1430
1431 struct task_cputime cputime_expires;
1432 struct list_head cpu_timers[3];
1433
1434
1435 const struct cred __rcu *real_cred;
1436
1437 const struct cred __rcu *cred;
1438
1439 char comm[TASK_COMM_LEN];
1440
1441
1442
1443
1444 int link_count, total_link_count;
1445#ifdef CONFIG_SYSVIPC
1446
1447 struct sysv_sem sysvsem;
1448 struct sysv_shm sysvshm;
1449#endif
1450#ifdef CONFIG_DETECT_HUNG_TASK
1451
1452 unsigned long last_switch_count;
1453#endif
1454
1455 struct thread_struct thread;
1456
1457 struct fs_struct *fs;
1458
1459 struct files_struct *files;
1460
1461 struct nsproxy *nsproxy;
1462
1463 struct signal_struct *signal;
1464 struct sighand_struct *sighand;
1465
1466 sigset_t blocked, real_blocked;
1467 sigset_t saved_sigmask;
1468 struct sigpending pending;
1469
1470 unsigned long sas_ss_sp;
1471 size_t sas_ss_size;
1472 int (*notifier)(void *priv);
1473 void *notifier_data;
1474 sigset_t *notifier_mask;
1475 struct callback_head *task_works;
1476
1477 struct audit_context *audit_context;
1478#ifdef CONFIG_AUDITSYSCALL
1479 kuid_t loginuid;
1480 unsigned int sessionid;
1481#endif
1482 struct seccomp seccomp;
1483
1484
1485 u32 parent_exec_id;
1486 u32 self_exec_id;
1487
1488
1489 spinlock_t alloc_lock;
1490
1491
1492 raw_spinlock_t pi_lock;
1493
1494#ifdef CONFIG_RT_MUTEXES
1495
1496 struct rb_root pi_waiters;
1497 struct rb_node *pi_waiters_leftmost;
1498
1499 struct rt_mutex_waiter *pi_blocked_on;
1500#endif
1501
1502#ifdef CONFIG_DEBUG_MUTEXES
1503
1504 struct mutex_waiter *blocked_on;
1505#endif
1506#ifdef CONFIG_TRACE_IRQFLAGS
1507 unsigned int irq_events;
1508 unsigned long hardirq_enable_ip;
1509 unsigned long hardirq_disable_ip;
1510 unsigned int hardirq_enable_event;
1511 unsigned int hardirq_disable_event;
1512 int hardirqs_enabled;
1513 int hardirq_context;
1514 unsigned long softirq_disable_ip;
1515 unsigned long softirq_enable_ip;
1516 unsigned int softirq_disable_event;
1517 unsigned int softirq_enable_event;
1518 int softirqs_enabled;
1519 int softirq_context;
1520#endif
1521#ifdef CONFIG_LOCKDEP
1522# define MAX_LOCK_DEPTH 48UL
1523 u64 curr_chain_key;
1524 int lockdep_depth;
1525 unsigned int lockdep_recursion;
1526 struct held_lock held_locks[MAX_LOCK_DEPTH];
1527 gfp_t lockdep_reclaim_gfp;
1528#endif
1529
1530
1531 void *journal_info;
1532
1533
1534 struct bio_list *bio_list;
1535
1536#ifdef CONFIG_BLOCK
1537
1538 struct blk_plug *plug;
1539#endif
1540
1541
1542 struct reclaim_state *reclaim_state;
1543
1544 struct backing_dev_info *backing_dev_info;
1545
1546 struct io_context *io_context;
1547
1548 unsigned long ptrace_message;
1549 siginfo_t *last_siginfo;
1550 struct task_io_accounting ioac;
1551#if defined(CONFIG_TASK_XACCT)
1552 u64 acct_rss_mem1;
1553 u64 acct_vm_mem1;
1554 cputime_t acct_timexpd;
1555#endif
1556#ifdef CONFIG_CPUSETS
1557 nodemask_t mems_allowed;
1558 seqcount_t mems_allowed_seq;
1559 int cpuset_mem_spread_rotor;
1560 int cpuset_slab_spread_rotor;
1561#endif
1562#ifdef CONFIG_CGROUPS
1563
1564 struct css_set __rcu *cgroups;
1565
1566 struct list_head cg_list;
1567#endif
1568#ifdef CONFIG_FUTEX
1569 struct robust_list_head __user *robust_list;
1570#ifdef CONFIG_COMPAT
1571 struct compat_robust_list_head __user *compat_robust_list;
1572#endif
1573 struct list_head pi_state_list;
1574 struct futex_pi_state *pi_state_cache;
1575#endif
1576#ifdef CONFIG_PERF_EVENTS
1577 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1578 struct mutex perf_event_mutex;
1579 struct list_head perf_event_list;
1580#endif
1581#ifdef CONFIG_DEBUG_PREEMPT
1582 unsigned long preempt_disable_ip;
1583#endif
1584#ifdef CONFIG_NUMA
1585 struct mempolicy *mempolicy;
1586 short il_next;
1587 short pref_node_fork;
1588#endif
1589#ifdef CONFIG_NUMA_BALANCING
1590 int numa_scan_seq;
1591 unsigned int numa_scan_period;
1592 unsigned int numa_scan_period_max;
1593 int numa_preferred_nid;
1594 unsigned long numa_migrate_retry;
1595 u64 node_stamp;
1596 u64 last_task_numa_placement;
1597 u64 last_sum_exec_runtime;
1598 struct callback_head numa_work;
1599
1600 struct list_head numa_entry;
1601 struct numa_group *numa_group;
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617 unsigned long *numa_faults;
1618 unsigned long total_numa_faults;
1619
1620
1621
1622
1623
1624
1625
1626 unsigned long numa_faults_locality[2];
1627
1628 unsigned long numa_pages_migrated;
1629#endif
1630
1631 struct rcu_head rcu;
1632
1633
1634
1635
1636 struct pipe_inode_info *splice_pipe;
1637
1638 struct page_frag task_frag;
1639
1640#ifdef CONFIG_TASK_DELAY_ACCT
1641 struct task_delay_info *delays;
1642#endif
1643#ifdef CONFIG_FAULT_INJECTION
1644 int make_it_fail;
1645#endif
1646
1647
1648
1649
1650 int nr_dirtied;
1651 int nr_dirtied_pause;
1652 unsigned long dirty_paused_when;
1653
1654#ifdef CONFIG_LATENCYTOP
1655 int latency_record_count;
1656 struct latency_record latency_record[LT_SAVECOUNT];
1657#endif
1658
1659
1660
1661
1662 unsigned long timer_slack_ns;
1663 unsigned long default_timer_slack_ns;
1664
1665#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1666
1667 int curr_ret_stack;
1668
1669 struct ftrace_ret_stack *ret_stack;
1670
1671 unsigned long long ftrace_timestamp;
1672
1673
1674
1675
1676 atomic_t trace_overrun;
1677
1678 atomic_t tracing_graph_pause;
1679#endif
1680#ifdef CONFIG_TRACING
1681
1682 unsigned long trace;
1683
1684 unsigned long trace_recursion;
1685#endif
1686#ifdef CONFIG_MEMCG
1687 struct memcg_oom_info {
1688 struct mem_cgroup *memcg;
1689 gfp_t gfp_mask;
1690 int order;
1691 unsigned int may_oom:1;
1692 } memcg_oom;
1693#endif
1694#ifdef CONFIG_UPROBES
1695 struct uprobe_task *utask;
1696#endif
1697#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1698 unsigned int sequential_io;
1699 unsigned int sequential_io_avg;
1700#endif
1701#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1702 unsigned long task_state_change;
1703#endif
1704};
1705
1706
1707#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1708
1709#define TNF_MIGRATED 0x01
1710#define TNF_NO_GROUP 0x02
1711#define TNF_SHARED 0x04
1712#define TNF_FAULT_LOCAL 0x08
1713
1714#ifdef CONFIG_NUMA_BALANCING
1715extern void task_numa_fault(int last_node, int node, int pages, int flags);
1716extern pid_t task_numa_group_id(struct task_struct *p);
1717extern void set_numabalancing_state(bool enabled);
1718extern void task_numa_free(struct task_struct *p);
1719extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
1720 int src_nid, int dst_cpu);
1721#else
1722static inline void task_numa_fault(int last_node, int node, int pages,
1723 int flags)
1724{
1725}
1726static inline pid_t task_numa_group_id(struct task_struct *p)
1727{
1728 return 0;
1729}
1730static inline void set_numabalancing_state(bool enabled)
1731{
1732}
1733static inline void task_numa_free(struct task_struct *p)
1734{
1735}
1736static inline bool should_numa_migrate_memory(struct task_struct *p,
1737 struct page *page, int src_nid, int dst_cpu)
1738{
1739 return true;
1740}
1741#endif
1742
1743static inline struct pid *task_pid(struct task_struct *task)
1744{
1745 return task->pids[PIDTYPE_PID].pid;
1746}
1747
1748static inline struct pid *task_tgid(struct task_struct *task)
1749{
1750 return task->group_leader->pids[PIDTYPE_PID].pid;
1751}
1752
1753
1754
1755
1756
1757
1758static inline struct pid *task_pgrp(struct task_struct *task)
1759{
1760 return task->group_leader->pids[PIDTYPE_PGID].pid;
1761}
1762
1763static inline struct pid *task_session(struct task_struct *task)
1764{
1765 return task->group_leader->pids[PIDTYPE_SID].pid;
1766}
1767
1768struct pid_namespace;
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1784 struct pid_namespace *ns);
1785
1786static inline pid_t task_pid_nr(struct task_struct *tsk)
1787{
1788 return tsk->pid;
1789}
1790
1791static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
1792 struct pid_namespace *ns)
1793{
1794 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1795}
1796
1797static inline pid_t task_pid_vnr(struct task_struct *tsk)
1798{
1799 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1800}
1801
1802
1803static inline pid_t task_tgid_nr(struct task_struct *tsk)
1804{
1805 return tsk->tgid;
1806}
1807
1808pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1809
1810static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1811{
1812 return pid_vnr(task_tgid(tsk));
1813}
1814
1815
1816static inline int pid_alive(const struct task_struct *p);
1817static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1818{
1819 pid_t pid = 0;
1820
1821 rcu_read_lock();
1822 if (pid_alive(tsk))
1823 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1824 rcu_read_unlock();
1825
1826 return pid;
1827}
1828
1829static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1830{
1831 return task_ppid_nr_ns(tsk, &init_pid_ns);
1832}
1833
1834static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
1835 struct pid_namespace *ns)
1836{
1837 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1838}
1839
1840static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1841{
1842 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1843}
1844
1845
1846static inline pid_t task_session_nr_ns(struct task_struct *tsk,
1847 struct pid_namespace *ns)
1848{
1849 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1850}
1851
1852static inline pid_t task_session_vnr(struct task_struct *tsk)
1853{
1854 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1855}
1856
1857
1858static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1859{
1860 return task_pgrp_nr_ns(tsk, &init_pid_ns);
1861}
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873static inline int pid_alive(const struct task_struct *p)
1874{
1875 return p->pids[PIDTYPE_PID].pid != NULL;
1876}
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886static inline int is_global_init(struct task_struct *tsk)
1887{
1888 return tsk->pid == 1;
1889}
1890
1891extern struct pid *cad_pid;
1892
1893extern void free_task(struct task_struct *tsk);
1894#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
1895
1896extern void __put_task_struct(struct task_struct *t);
1897
1898static inline void put_task_struct(struct task_struct *t)
1899{
1900 if (atomic_dec_and_test(&t->usage))
1901 __put_task_struct(t);
1902}
1903
1904#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1905extern void task_cputime(struct task_struct *t,
1906 cputime_t *utime, cputime_t *stime);
1907extern void task_cputime_scaled(struct task_struct *t,
1908 cputime_t *utimescaled, cputime_t *stimescaled);
1909extern cputime_t task_gtime(struct task_struct *t);
1910#else
1911static inline void task_cputime(struct task_struct *t,
1912 cputime_t *utime, cputime_t *stime)
1913{
1914 if (utime)
1915 *utime = t->utime;
1916 if (stime)
1917 *stime = t->stime;
1918}
1919
1920static inline void task_cputime_scaled(struct task_struct *t,
1921 cputime_t *utimescaled,
1922 cputime_t *stimescaled)
1923{
1924 if (utimescaled)
1925 *utimescaled = t->utimescaled;
1926 if (stimescaled)
1927 *stimescaled = t->stimescaled;
1928}
1929
1930static inline cputime_t task_gtime(struct task_struct *t)
1931{
1932 return t->gtime;
1933}
1934#endif
1935extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1936extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1937
1938
1939
1940
1941#define PF_EXITING 0x00000004
1942#define PF_EXITPIDONE 0x00000008
1943#define PF_VCPU 0x00000010
1944#define PF_WQ_WORKER 0x00000020
1945#define PF_FORKNOEXEC 0x00000040
1946#define PF_MCE_PROCESS 0x00000080
1947#define PF_SUPERPRIV 0x00000100
1948#define PF_DUMPCORE 0x00000200
1949#define PF_SIGNALED 0x00000400
1950#define PF_MEMALLOC 0x00000800
1951#define PF_NPROC_EXCEEDED 0x00001000
1952#define PF_USED_MATH 0x00002000
1953#define PF_USED_ASYNC 0x00004000
1954#define PF_NOFREEZE 0x00008000
1955#define PF_FROZEN 0x00010000
1956#define PF_FSTRANS 0x00020000
1957#define PF_KSWAPD 0x00040000
1958#define PF_MEMALLOC_NOIO 0x00080000
1959#define PF_LESS_THROTTLE 0x00100000
1960#define PF_KTHREAD 0x00200000
1961#define PF_RANDOMIZE 0x00400000
1962#define PF_SWAPWRITE 0x00800000
1963#define PF_NO_SETAFFINITY 0x04000000
1964#define PF_MCE_EARLY 0x08000000
1965#define PF_MUTEX_TESTER 0x20000000
1966#define PF_FREEZER_SKIP 0x40000000
1967#define PF_SUSPEND_TASK 0x80000000
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1981#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1982#define clear_used_math() clear_stopped_child_used_math(current)
1983#define set_used_math() set_stopped_child_used_math(current)
1984#define conditional_stopped_child_used_math(condition, child) \
1985 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1986#define conditional_used_math(condition) \
1987 conditional_stopped_child_used_math(condition, current)
1988#define copy_to_stopped_child_used_math(child) \
1989 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1990
1991#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1992#define used_math() tsk_used_math(current)
1993
1994
1995
1996
1997static inline gfp_t memalloc_noio_flags(gfp_t flags)
1998{
1999 if (unlikely(current->flags & PF_MEMALLOC_NOIO))
2000 flags &= ~(__GFP_IO | __GFP_FS);
2001 return flags;
2002}
2003
2004static inline unsigned int memalloc_noio_save(void)
2005{
2006 unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
2007 current->flags |= PF_MEMALLOC_NOIO;
2008 return flags;
2009}
2010
2011static inline void memalloc_noio_restore(unsigned int flags)
2012{
2013 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
2014}
2015
2016
2017#define PFA_NO_NEW_PRIVS 0
2018#define PFA_SPREAD_PAGE 1
2019#define PFA_SPREAD_SLAB 2
2020
2021
2022#define TASK_PFA_TEST(name, func) \
2023 static inline bool task_##func(struct task_struct *p) \
2024 { return test_bit(PFA_##name, &p->atomic_flags); }
2025#define TASK_PFA_SET(name, func) \
2026 static inline void task_set_##func(struct task_struct *p) \
2027 { set_bit(PFA_##name, &p->atomic_flags); }
2028#define TASK_PFA_CLEAR(name, func) \
2029 static inline void task_clear_##func(struct task_struct *p) \
2030 { clear_bit(PFA_##name, &p->atomic_flags); }
2031
2032TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
2033TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
2034
2035TASK_PFA_TEST(SPREAD_PAGE, spread_page)
2036TASK_PFA_SET(SPREAD_PAGE, spread_page)
2037TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
2038
2039TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
2040TASK_PFA_SET(SPREAD_SLAB, spread_slab)
2041TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
2042
2043
2044
2045
2046#define JOBCTL_STOP_SIGMASK 0xffff
2047
2048#define JOBCTL_STOP_DEQUEUED_BIT 16
2049#define JOBCTL_STOP_PENDING_BIT 17
2050#define JOBCTL_STOP_CONSUME_BIT 18
2051#define JOBCTL_TRAP_STOP_BIT 19
2052#define JOBCTL_TRAP_NOTIFY_BIT 20
2053#define JOBCTL_TRAPPING_BIT 21
2054#define JOBCTL_LISTENING_BIT 22
2055
2056#define JOBCTL_STOP_DEQUEUED (1 << JOBCTL_STOP_DEQUEUED_BIT)
2057#define JOBCTL_STOP_PENDING (1 << JOBCTL_STOP_PENDING_BIT)
2058#define JOBCTL_STOP_CONSUME (1 << JOBCTL_STOP_CONSUME_BIT)
2059#define JOBCTL_TRAP_STOP (1 << JOBCTL_TRAP_STOP_BIT)
2060#define JOBCTL_TRAP_NOTIFY (1 << JOBCTL_TRAP_NOTIFY_BIT)
2061#define JOBCTL_TRAPPING (1 << JOBCTL_TRAPPING_BIT)
2062#define JOBCTL_LISTENING (1 << JOBCTL_LISTENING_BIT)
2063
2064#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
2065#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
2066
2067extern bool task_set_jobctl_pending(struct task_struct *task,
2068 unsigned int mask);
2069extern void task_clear_jobctl_trapping(struct task_struct *task);
2070extern void task_clear_jobctl_pending(struct task_struct *task,
2071 unsigned int mask);
2072
2073static inline void rcu_copy_process(struct task_struct *p)
2074{
2075#ifdef CONFIG_PREEMPT_RCU
2076 p->rcu_read_lock_nesting = 0;
2077 p->rcu_read_unlock_special.s = 0;
2078 p->rcu_blocked_node = NULL;
2079 INIT_LIST_HEAD(&p->rcu_node_entry);
2080#endif
2081#ifdef CONFIG_TASKS_RCU
2082 p->rcu_tasks_holdout = false;
2083 INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
2084 p->rcu_tasks_idle_cpu = -1;
2085#endif
2086}
2087
2088static inline void tsk_restore_flags(struct task_struct *task,
2089 unsigned long orig_flags, unsigned long flags)
2090{
2091 task->flags &= ~flags;
2092 task->flags |= orig_flags & flags;
2093}
2094
2095extern int cpuset_cpumask_can_shrink(const struct cpumask *cur,
2096 const struct cpumask *trial);
2097extern int task_can_attach(struct task_struct *p,
2098 const struct cpumask *cs_cpus_allowed);
2099#ifdef CONFIG_SMP
2100extern void do_set_cpus_allowed(struct task_struct *p,
2101 const struct cpumask *new_mask);
2102
2103extern int set_cpus_allowed_ptr(struct task_struct *p,
2104 const struct cpumask *new_mask);
2105#else
2106static inline void do_set_cpus_allowed(struct task_struct *p,
2107 const struct cpumask *new_mask)
2108{
2109}
2110static inline int set_cpus_allowed_ptr(struct task_struct *p,
2111 const struct cpumask *new_mask)
2112{
2113 if (!cpumask_test_cpu(0, new_mask))
2114 return -EINVAL;
2115 return 0;
2116}
2117#endif
2118
2119#ifdef CONFIG_NO_HZ_COMMON
2120void calc_load_enter_idle(void);
2121void calc_load_exit_idle(void);
2122#else
2123static inline void calc_load_enter_idle(void) { }
2124static inline void calc_load_exit_idle(void) { }
2125#endif
2126
2127#ifndef CONFIG_CPUMASK_OFFSTACK
2128static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
2129{
2130 return set_cpus_allowed_ptr(p, &new_mask);
2131}
2132#endif
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142extern unsigned long long notrace sched_clock(void);
2143
2144
2145
2146extern u64 cpu_clock(int cpu);
2147extern u64 local_clock(void);
2148extern u64 sched_clock_cpu(int cpu);
2149
2150
2151extern void sched_clock_init(void);
2152
2153#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2154static inline void sched_clock_tick(void)
2155{
2156}
2157
2158static inline void sched_clock_idle_sleep_event(void)
2159{
2160}
2161
2162static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
2163{
2164}
2165#else
2166
2167
2168
2169
2170
2171
2172extern int sched_clock_stable(void);
2173extern void set_sched_clock_stable(void);
2174extern void clear_sched_clock_stable(void);
2175
2176extern void sched_clock_tick(void);
2177extern void sched_clock_idle_sleep_event(void);
2178extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2179#endif
2180
2181#ifdef CONFIG_IRQ_TIME_ACCOUNTING
2182
2183
2184
2185
2186
2187extern void enable_sched_clock_irqtime(void);
2188extern void disable_sched_clock_irqtime(void);
2189#else
2190static inline void enable_sched_clock_irqtime(void) {}
2191static inline void disable_sched_clock_irqtime(void) {}
2192#endif
2193
2194extern unsigned long long
2195task_sched_runtime(struct task_struct *task);
2196
2197
2198#ifdef CONFIG_SMP
2199extern void sched_exec(void);
2200#else
2201#define sched_exec() {}
2202#endif
2203
2204extern void sched_clock_idle_sleep_event(void);
2205extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2206
2207#ifdef CONFIG_HOTPLUG_CPU
2208extern void idle_task_exit(void);
2209#else
2210static inline void idle_task_exit(void) {}
2211#endif
2212
2213#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
2214extern void wake_up_nohz_cpu(int cpu);
2215#else
2216static inline void wake_up_nohz_cpu(int cpu) { }
2217#endif
2218
2219#ifdef CONFIG_NO_HZ_FULL
2220extern bool sched_can_stop_tick(void);
2221extern u64 scheduler_tick_max_deferment(void);
2222#else
2223static inline bool sched_can_stop_tick(void) { return false; }
2224#endif
2225
2226#ifdef CONFIG_SCHED_AUTOGROUP
2227extern void sched_autogroup_create_attach(struct task_struct *p);
2228extern void sched_autogroup_detach(struct task_struct *p);
2229extern void sched_autogroup_fork(struct signal_struct *sig);
2230extern void sched_autogroup_exit(struct signal_struct *sig);
2231#ifdef CONFIG_PROC_FS
2232extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
2233extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
2234#endif
2235#else
2236static inline void sched_autogroup_create_attach(struct task_struct *p) { }
2237static inline void sched_autogroup_detach(struct task_struct *p) { }
2238static inline void sched_autogroup_fork(struct signal_struct *sig) { }
2239static inline void sched_autogroup_exit(struct signal_struct *sig) { }
2240#endif
2241
2242extern int yield_to(struct task_struct *p, bool preempt);
2243extern void set_user_nice(struct task_struct *p, long nice);
2244extern int task_prio(const struct task_struct *p);
2245
2246
2247
2248
2249
2250
2251static inline int task_nice(const struct task_struct *p)
2252{
2253 return PRIO_TO_NICE((p)->static_prio);
2254}
2255extern int can_nice(const struct task_struct *p, const int nice);
2256extern int task_curr(const struct task_struct *p);
2257extern int idle_cpu(int cpu);
2258extern int sched_setscheduler(struct task_struct *, int,
2259 const struct sched_param *);
2260extern int sched_setscheduler_nocheck(struct task_struct *, int,
2261 const struct sched_param *);
2262extern int sched_setattr(struct task_struct *,
2263 const struct sched_attr *);
2264extern struct task_struct *idle_task(int cpu);
2265
2266
2267
2268
2269
2270
2271static inline bool is_idle_task(const struct task_struct *p)
2272{
2273 return p->pid == 0;
2274}
2275extern struct task_struct *curr_task(int cpu);
2276extern void set_curr_task(int cpu, struct task_struct *p);
2277
2278void yield(void);
2279
2280
2281
2282
2283extern struct exec_domain default_exec_domain;
2284
2285union thread_union {
2286 struct thread_info thread_info;
2287 unsigned long stack[THREAD_SIZE/sizeof(long)];
2288};
2289
2290#ifndef __HAVE_ARCH_KSTACK_END
2291static inline int kstack_end(void *addr)
2292{
2293
2294
2295
2296 return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
2297}
2298#endif
2299
2300extern union thread_union init_thread_union;
2301extern struct task_struct init_task;
2302
2303extern struct mm_struct init_mm;
2304
2305extern struct pid_namespace init_pid_ns;
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318extern struct task_struct *find_task_by_vpid(pid_t nr);
2319extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2320 struct pid_namespace *ns);
2321
2322
2323extern struct user_struct * alloc_uid(kuid_t);
2324static inline struct user_struct *get_uid(struct user_struct *u)
2325{
2326 atomic_inc(&u->__count);
2327 return u;
2328}
2329extern void free_uid(struct user_struct *);
2330
2331#include <asm/current.h>
2332
2333extern void xtime_update(unsigned long ticks);
2334
2335extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2336extern int wake_up_process(struct task_struct *tsk);
2337extern void wake_up_new_task(struct task_struct *tsk);
2338#ifdef CONFIG_SMP
2339 extern void kick_process(struct task_struct *tsk);
2340#else
2341 static inline void kick_process(struct task_struct *tsk) { }
2342#endif
2343extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
2344extern void sched_dead(struct task_struct *p);
2345
2346extern void proc_caches_init(void);
2347extern void flush_signals(struct task_struct *);
2348extern void __flush_signals(struct task_struct *);
2349extern void ignore_signals(struct task_struct *);
2350extern void flush_signal_handlers(struct task_struct *, int force_default);
2351extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
2352
2353static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
2354{
2355 unsigned long flags;
2356 int ret;
2357
2358 spin_lock_irqsave(&tsk->sighand->siglock, flags);
2359 ret = dequeue_signal(tsk, mask, info);
2360 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
2361
2362 return ret;
2363}
2364
2365extern void block_all_signals(int (*notifier)(void *priv), void *priv,
2366 sigset_t *mask);
2367extern void unblock_all_signals(void);
2368extern void release_task(struct task_struct * p);
2369extern int send_sig_info(int, struct siginfo *, struct task_struct *);
2370extern int force_sigsegv(int, struct task_struct *);
2371extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2372extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
2373extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2374extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
2375 const struct cred *, u32);
2376extern int kill_pgrp(struct pid *pid, int sig, int priv);
2377extern int kill_pid(struct pid *pid, int sig, int priv);
2378extern int kill_proc_info(int, struct siginfo *, pid_t);
2379extern __must_check bool do_notify_parent(struct task_struct *, int);
2380extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2381extern void force_sig(int, struct task_struct *);
2382extern int send_sig(int, struct task_struct *, int);
2383extern int zap_other_threads(struct task_struct *p);
2384extern struct sigqueue *sigqueue_alloc(void);
2385extern void sigqueue_free(struct sigqueue *);
2386extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
2387extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
2388
2389static inline void restore_saved_sigmask(void)
2390{
2391 if (test_and_clear_restore_sigmask())
2392 __set_current_blocked(¤t->saved_sigmask);
2393}
2394
2395static inline sigset_t *sigmask_to_save(void)
2396{
2397 sigset_t *res = ¤t->blocked;
2398 if (unlikely(test_restore_sigmask()))
2399 res = ¤t->saved_sigmask;
2400 return res;
2401}
2402
2403static inline int kill_cad_pid(int sig, int priv)
2404{
2405 return kill_pid(cad_pid, sig, priv);
2406}
2407
2408
2409#define SEND_SIG_NOINFO ((struct siginfo *) 0)
2410#define SEND_SIG_PRIV ((struct siginfo *) 1)
2411#define SEND_SIG_FORCED ((struct siginfo *) 2)
2412
2413
2414
2415
2416static inline int on_sig_stack(unsigned long sp)
2417{
2418#ifdef CONFIG_STACK_GROWSUP
2419 return sp >= current->sas_ss_sp &&
2420 sp - current->sas_ss_sp < current->sas_ss_size;
2421#else
2422 return sp > current->sas_ss_sp &&
2423 sp - current->sas_ss_sp <= current->sas_ss_size;
2424#endif
2425}
2426
2427static inline int sas_ss_flags(unsigned long sp)
2428{
2429 if (!current->sas_ss_size)
2430 return SS_DISABLE;
2431
2432 return on_sig_stack(sp) ? SS_ONSTACK : 0;
2433}
2434
2435static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
2436{
2437 if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
2438#ifdef CONFIG_STACK_GROWSUP
2439 return current->sas_ss_sp;
2440#else
2441 return current->sas_ss_sp + current->sas_ss_size;
2442#endif
2443 return sp;
2444}
2445
2446
2447
2448
2449extern struct mm_struct * mm_alloc(void);
2450
2451
2452extern void __mmdrop(struct mm_struct *);
2453static inline void mmdrop(struct mm_struct * mm)
2454{
2455 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
2456 __mmdrop(mm);
2457}
2458
2459
2460extern void mmput(struct mm_struct *);
2461
2462extern struct mm_struct *get_task_mm(struct task_struct *task);
2463
2464
2465
2466
2467
2468extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
2469
2470extern void mm_release(struct task_struct *, struct mm_struct *);
2471
2472extern int copy_thread(unsigned long, unsigned long, unsigned long,
2473 struct task_struct *);
2474extern void flush_thread(void);
2475extern void exit_thread(void);
2476
2477extern void exit_files(struct task_struct *);
2478extern void __cleanup_sighand(struct sighand_struct *);
2479
2480extern void exit_itimers(struct signal_struct *);
2481extern void flush_itimer_signals(void);
2482
2483extern void do_group_exit(int);
2484
2485extern int do_execve(struct filename *,
2486 const char __user * const __user *,
2487 const char __user * const __user *);
2488extern int do_execveat(int, struct filename *,
2489 const char __user * const __user *,
2490 const char __user * const __user *,
2491 int);
2492extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
2493struct task_struct *fork_idle(int);
2494extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
2495
2496extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
2497static inline void set_task_comm(struct task_struct *tsk, const char *from)
2498{
2499 __set_task_comm(tsk, from, false);
2500}
2501extern char *get_task_comm(char *to, struct task_struct *tsk);
2502
2503#ifdef CONFIG_SMP
2504void scheduler_ipi(void);
2505extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
2506#else
2507static inline void scheduler_ipi(void) { }
2508static inline unsigned long wait_task_inactive(struct task_struct *p,
2509 long match_state)
2510{
2511 return 1;
2512}
2513#endif
2514
2515#define next_task(p) \
2516 list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
2517
2518#define for_each_process(p) \
2519 for (p = &init_task ; (p = next_task(p)) != &init_task ; )
2520
2521extern bool current_is_single_threaded(void);
2522
2523
2524
2525
2526
2527#define do_each_thread(g, t) \
2528 for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
2529
2530#define while_each_thread(g, t) \
2531 while ((t = next_thread(t)) != g)
2532
2533#define __for_each_thread(signal, t) \
2534 list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
2535
2536#define for_each_thread(p, t) \
2537 __for_each_thread((p)->signal, t)
2538
2539
2540#define for_each_process_thread(p, t) \
2541 for_each_process(p) for_each_thread(p, t)
2542
2543static inline int get_nr_threads(struct task_struct *tsk)
2544{
2545 return tsk->signal->nr_threads;
2546}
2547
2548static inline bool thread_group_leader(struct task_struct *p)
2549{
2550 return p->exit_signal >= 0;
2551}
2552
2553
2554
2555
2556
2557
2558
2559static inline bool has_group_leader_pid(struct task_struct *p)
2560{
2561 return task_pid(p) == p->signal->leader_pid;
2562}
2563
2564static inline
2565bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
2566{
2567 return p1->signal == p2->signal;
2568}
2569
2570static inline struct task_struct *next_thread(const struct task_struct *p)
2571{
2572 return list_entry_rcu(p->thread_group.next,
2573 struct task_struct, thread_group);
2574}
2575
2576static inline int thread_group_empty(struct task_struct *p)
2577{
2578 return list_empty(&p->thread_group);
2579}
2580
2581#define delay_group_leader(p) \
2582 (thread_group_leader(p) && !thread_group_empty(p))
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594static inline void task_lock(struct task_struct *p)
2595{
2596 spin_lock(&p->alloc_lock);
2597}
2598
2599static inline void task_unlock(struct task_struct *p)
2600{
2601 spin_unlock(&p->alloc_lock);
2602}
2603
2604extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2605 unsigned long *flags);
2606
2607static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
2608 unsigned long *flags)
2609{
2610 struct sighand_struct *ret;
2611
2612 ret = __lock_task_sighand(tsk, flags);
2613 (void)__cond_lock(&tsk->sighand->siglock, ret);
2614 return ret;
2615}
2616
2617static inline void unlock_task_sighand(struct task_struct *tsk,
2618 unsigned long *flags)
2619{
2620 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2621}
2622
2623#ifdef CONFIG_CGROUPS
2624static inline void threadgroup_change_begin(struct task_struct *tsk)
2625{
2626 down_read(&tsk->signal->group_rwsem);
2627}
2628static inline void threadgroup_change_end(struct task_struct *tsk)
2629{
2630 up_read(&tsk->signal->group_rwsem);
2631}
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649static inline void threadgroup_lock(struct task_struct *tsk)
2650{
2651 down_write(&tsk->signal->group_rwsem);
2652}
2653
2654
2655
2656
2657
2658
2659
2660static inline void threadgroup_unlock(struct task_struct *tsk)
2661{
2662 up_write(&tsk->signal->group_rwsem);
2663}
2664#else
2665static inline void threadgroup_change_begin(struct task_struct *tsk) {}
2666static inline void threadgroup_change_end(struct task_struct *tsk) {}
2667static inline void threadgroup_lock(struct task_struct *tsk) {}
2668static inline void threadgroup_unlock(struct task_struct *tsk) {}
2669#endif
2670
2671#ifndef __HAVE_THREAD_FUNCTIONS
2672
2673#define task_thread_info(task) ((struct thread_info *)(task)->stack)
2674#define task_stack_page(task) ((task)->stack)
2675
2676static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
2677{
2678 *task_thread_info(p) = *task_thread_info(org);
2679 task_thread_info(p)->task = p;
2680}
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691static inline unsigned long *end_of_stack(struct task_struct *p)
2692{
2693#ifdef CONFIG_STACK_GROWSUP
2694 return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
2695#else
2696 return (unsigned long *)(task_thread_info(p) + 1);
2697#endif
2698}
2699
2700#endif
2701#define task_stack_end_corrupted(task) \
2702 (*(end_of_stack(task)) != STACK_END_MAGIC)
2703
2704static inline int object_is_on_stack(void *obj)
2705{
2706 void *stack = task_stack_page(current);
2707
2708 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
2709}
2710
2711extern void thread_info_cache_init(void);
2712
2713#ifdef CONFIG_DEBUG_STACK_USAGE
2714static inline unsigned long stack_not_used(struct task_struct *p)
2715{
2716 unsigned long *n = end_of_stack(p);
2717
2718 do {
2719 n++;
2720 } while (!*n);
2721
2722 return (unsigned long)n - (unsigned long)end_of_stack(p);
2723}
2724#endif
2725extern void set_task_stack_end_magic(struct task_struct *tsk);
2726
2727
2728
2729
2730static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
2731{
2732 set_ti_thread_flag(task_thread_info(tsk), flag);
2733}
2734
2735static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2736{
2737 clear_ti_thread_flag(task_thread_info(tsk), flag);
2738}
2739
2740static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
2741{
2742 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
2743}
2744
2745static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2746{
2747 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
2748}
2749
2750static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2751{
2752 return test_ti_thread_flag(task_thread_info(tsk), flag);
2753}
2754
2755static inline void set_tsk_need_resched(struct task_struct *tsk)
2756{
2757 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2758}
2759
2760static inline void clear_tsk_need_resched(struct task_struct *tsk)
2761{
2762 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2763}
2764
2765static inline int test_tsk_need_resched(struct task_struct *tsk)
2766{
2767 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2768}
2769
2770static inline int restart_syscall(void)
2771{
2772 set_tsk_thread_flag(current, TIF_SIGPENDING);
2773 return -ERESTARTNOINTR;
2774}
2775
2776static inline int signal_pending(struct task_struct *p)
2777{
2778 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
2779}
2780
2781static inline int __fatal_signal_pending(struct task_struct *p)
2782{
2783 return unlikely(sigismember(&p->pending.signal, SIGKILL));
2784}
2785
2786static inline int fatal_signal_pending(struct task_struct *p)
2787{
2788 return signal_pending(p) && __fatal_signal_pending(p);
2789}
2790
2791static inline int signal_pending_state(long state, struct task_struct *p)
2792{
2793 if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
2794 return 0;
2795 if (!signal_pending(p))
2796 return 0;
2797
2798 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
2799}
2800
2801
2802
2803
2804
2805
2806
2807
2808extern int _cond_resched(void);
2809
2810#define cond_resched() ({ \
2811 ___might_sleep(__FILE__, __LINE__, 0); \
2812 _cond_resched(); \
2813})
2814
2815extern int __cond_resched_lock(spinlock_t *lock);
2816
2817#ifdef CONFIG_PREEMPT_COUNT
2818#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
2819#else
2820#define PREEMPT_LOCK_OFFSET 0
2821#endif
2822
2823#define cond_resched_lock(lock) ({ \
2824 ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
2825 __cond_resched_lock(lock); \
2826})
2827
2828extern int __cond_resched_softirq(void);
2829
2830#define cond_resched_softirq() ({ \
2831 ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
2832 __cond_resched_softirq(); \
2833})
2834
2835static inline void cond_resched_rcu(void)
2836{
2837#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
2838 rcu_read_unlock();
2839 cond_resched();
2840 rcu_read_lock();
2841#endif
2842}
2843
2844
2845
2846
2847
2848
2849static inline int spin_needbreak(spinlock_t *lock)
2850{
2851#ifdef CONFIG_PREEMPT
2852 return spin_is_contended(lock);
2853#else
2854 return 0;
2855#endif
2856}
2857
2858
2859
2860
2861
2862#ifdef TIF_POLLING_NRFLAG
2863static inline int tsk_is_polling(struct task_struct *p)
2864{
2865 return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
2866}
2867
2868static inline void __current_set_polling(void)
2869{
2870 set_thread_flag(TIF_POLLING_NRFLAG);
2871}
2872
2873static inline bool __must_check current_set_polling_and_test(void)
2874{
2875 __current_set_polling();
2876
2877
2878
2879
2880
2881 smp_mb__after_atomic();
2882
2883 return unlikely(tif_need_resched());
2884}
2885
2886static inline void __current_clr_polling(void)
2887{
2888 clear_thread_flag(TIF_POLLING_NRFLAG);
2889}
2890
2891static inline bool __must_check current_clr_polling_and_test(void)
2892{
2893 __current_clr_polling();
2894
2895
2896
2897
2898
2899 smp_mb__after_atomic();
2900
2901 return unlikely(tif_need_resched());
2902}
2903
2904#else
2905static inline int tsk_is_polling(struct task_struct *p) { return 0; }
2906static inline void __current_set_polling(void) { }
2907static inline void __current_clr_polling(void) { }
2908
2909static inline bool __must_check current_set_polling_and_test(void)
2910{
2911 return unlikely(tif_need_resched());
2912}
2913static inline bool __must_check current_clr_polling_and_test(void)
2914{
2915 return unlikely(tif_need_resched());
2916}
2917#endif
2918
2919static inline void current_clr_polling(void)
2920{
2921 __current_clr_polling();
2922
2923
2924
2925
2926
2927
2928
2929 smp_mb();
2930
2931 preempt_fold_need_resched();
2932}
2933
2934static __always_inline bool need_resched(void)
2935{
2936 return unlikely(tif_need_resched());
2937}
2938
2939
2940
2941
2942void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
2943void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2944
2945static inline void thread_group_cputime_init(struct signal_struct *sig)
2946{
2947 raw_spin_lock_init(&sig->cputimer.lock);
2948}
2949
2950
2951
2952
2953
2954
2955
2956extern void recalc_sigpending_and_wake(struct task_struct *t);
2957extern void recalc_sigpending(void);
2958
2959extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
2960
2961static inline void signal_wake_up(struct task_struct *t, bool resume)
2962{
2963 signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
2964}
2965static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
2966{
2967 signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
2968}
2969
2970
2971
2972
2973#ifdef CONFIG_SMP
2974
2975static inline unsigned int task_cpu(const struct task_struct *p)
2976{
2977 return task_thread_info(p)->cpu;
2978}
2979
2980static inline int task_node(const struct task_struct *p)
2981{
2982 return cpu_to_node(task_cpu(p));
2983}
2984
2985extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
2986
2987#else
2988
2989static inline unsigned int task_cpu(const struct task_struct *p)
2990{
2991 return 0;
2992}
2993
2994static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2995{
2996}
2997
2998#endif
2999
3000extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
3001extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
3002
3003#ifdef CONFIG_CGROUP_SCHED
3004extern struct task_group root_task_group;
3005#endif
3006
3007extern int task_can_switch_user(struct user_struct *up,
3008 struct task_struct *tsk);
3009
3010#ifdef CONFIG_TASK_XACCT
3011static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
3012{
3013 tsk->ioac.rchar += amt;
3014}
3015
3016static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
3017{
3018 tsk->ioac.wchar += amt;
3019}
3020
3021static inline void inc_syscr(struct task_struct *tsk)
3022{
3023 tsk->ioac.syscr++;
3024}
3025
3026static inline void inc_syscw(struct task_struct *tsk)
3027{
3028 tsk->ioac.syscw++;
3029}
3030#else
3031static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
3032{
3033}
3034
3035static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
3036{
3037}
3038
3039static inline void inc_syscr(struct task_struct *tsk)
3040{
3041}
3042
3043static inline void inc_syscw(struct task_struct *tsk)
3044{
3045}
3046#endif
3047
3048#ifndef TASK_SIZE_OF
3049#define TASK_SIZE_OF(tsk) TASK_SIZE
3050#endif
3051
3052#ifdef CONFIG_MEMCG
3053extern void mm_update_next_owner(struct mm_struct *mm);
3054#else
3055static inline void mm_update_next_owner(struct mm_struct *mm)
3056{
3057}
3058#endif
3059
3060static inline unsigned long task_rlimit(const struct task_struct *tsk,
3061 unsigned int limit)
3062{
3063 return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
3064}
3065
3066static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
3067 unsigned int limit)
3068{
3069 return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
3070}
3071
3072static inline unsigned long rlimit(unsigned int limit)
3073{
3074 return task_rlimit(current, limit);
3075}
3076
3077static inline unsigned long rlimit_max(unsigned int limit)
3078{
3079 return task_rlimit_max(current, limit);
3080}
3081
3082#endif
3083