1#ifndef _LINUX_SCHED_H
2#define _LINUX_SCHED_H
3
4#include <uapi/linux/sched.h>
5
6#include <linux/sched/prio.h>
7
8
9struct sched_param {
10 int sched_priority;
11};
12
13#include <asm/param.h>
14
15#include <linux/capability.h>
16#include <linux/threads.h>
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/timex.h>
20#include <linux/jiffies.h>
21#include <linux/plist.h>
22#include <linux/rbtree.h>
23#include <linux/thread_info.h>
24#include <linux/cpumask.h>
25#include <linux/errno.h>
26#include <linux/nodemask.h>
27#include <linux/mm_types.h>
28#include <linux/preempt.h>
29
30#include <asm/page.h>
31#include <asm/ptrace.h>
32#include <linux/cputime.h>
33
34#include <linux/smp.h>
35#include <linux/sem.h>
36#include <linux/shm.h>
37#include <linux/signal.h>
38#include <linux/compiler.h>
39#include <linux/completion.h>
40#include <linux/pid.h>
41#include <linux/percpu.h>
42#include <linux/topology.h>
43#include <linux/seccomp.h>
44#include <linux/rcupdate.h>
45#include <linux/rculist.h>
46#include <linux/rtmutex.h>
47
48#include <linux/time.h>
49#include <linux/param.h>
50#include <linux/resource.h>
51#include <linux/timer.h>
52#include <linux/hrtimer.h>
53#include <linux/kcov.h>
54#include <linux/task_io_accounting.h>
55#include <linux/latencytop.h>
56#include <linux/cred.h>
57#include <linux/llist.h>
58#include <linux/uidgid.h>
59#include <linux/gfp.h>
60#include <linux/magic.h>
61#include <linux/cgroup-defs.h>
62
63#include <asm/processor.h>
64
65#define SCHED_ATTR_SIZE_VER0 48
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111struct sched_attr {
112 u32 size;
113
114 u32 sched_policy;
115 u64 sched_flags;
116
117
118 s32 sched_nice;
119
120
121 u32 sched_priority;
122
123
124 u64 sched_runtime;
125 u64 sched_deadline;
126 u64 sched_period;
127};
128
129struct futex_pi_state;
130struct robust_list_head;
131struct bio_list;
132struct fs_struct;
133struct perf_event_context;
134struct blk_plug;
135struct filename;
136struct nameidata;
137
138#define VMACACHE_BITS 2
139#define VMACACHE_SIZE (1U << VMACACHE_BITS)
140#define VMACACHE_MASK (VMACACHE_SIZE - 1)
141
142
143
144
145
146
147
148
149
150
151
152extern unsigned long avenrun[];
153extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
154
155#define FSHIFT 11
156#define FIXED_1 (1<<FSHIFT)
157#define LOAD_FREQ (5*HZ+1)
158#define EXP_1 1884
159#define EXP_5 2014
160#define EXP_15 2037
161
162#define CALC_LOAD(load,exp,n) \
163 load *= exp; \
164 load += n*(FIXED_1-exp); \
165 load >>= FSHIFT;
166
167extern unsigned long total_forks;
168extern int nr_threads;
169DECLARE_PER_CPU(unsigned long, process_counts);
170extern int nr_processes(void);
171extern unsigned long nr_running(void);
172extern bool single_task_running(void);
173extern unsigned long nr_iowait(void);
174extern unsigned long nr_iowait_cpu(int cpu);
175extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
176
177extern void calc_global_load(unsigned long ticks);
178
179#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
180extern void cpu_load_update_nohz_start(void);
181extern void cpu_load_update_nohz_stop(void);
182#else
183static inline void cpu_load_update_nohz_start(void) { }
184static inline void cpu_load_update_nohz_stop(void) { }
185#endif
186
187extern void dump_cpu_task(int cpu);
188
189struct seq_file;
190struct cfs_rq;
191struct task_group;
192#ifdef CONFIG_SCHED_DEBUG
193extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
194extern void proc_sched_set_task(struct task_struct *p);
195#endif
196
197
198
199
200
201
202
203
204
205
206
207#define TASK_RUNNING 0
208#define TASK_INTERRUPTIBLE 1
209#define TASK_UNINTERRUPTIBLE 2
210#define __TASK_STOPPED 4
211#define __TASK_TRACED 8
212
213#define EXIT_DEAD 16
214#define EXIT_ZOMBIE 32
215#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
216
217#define TASK_DEAD 64
218#define TASK_WAKEKILL 128
219#define TASK_WAKING 256
220#define TASK_PARKED 512
221#define TASK_NOLOAD 1024
222#define TASK_STATE_MAX 2048
223
224#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPN"
225
226extern char ___assert_task_state[1 - 2*!!(
227 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
228
229
230#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
231#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
232#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
233
234#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
235
236
237#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
238#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
239
240
241#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
242 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
243 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
244
245#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
246#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
247#define task_is_stopped_or_traced(task) \
248 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
249#define task_contributes_to_load(task) \
250 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
251 (task->flags & PF_FROZEN) == 0 && \
252 (task->state & TASK_NOLOAD) == 0)
253
254#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
255
256#define __set_task_state(tsk, state_value) \
257 do { \
258 (tsk)->task_state_change = _THIS_IP_; \
259 (tsk)->state = (state_value); \
260 } while (0)
261#define set_task_state(tsk, state_value) \
262 do { \
263 (tsk)->task_state_change = _THIS_IP_; \
264 smp_store_mb((tsk)->state, (state_value)); \
265 } while (0)
266
267
268
269
270
271
272
273
274
275
276
277
278#define __set_current_state(state_value) \
279 do { \
280 current->task_state_change = _THIS_IP_; \
281 current->state = (state_value); \
282 } while (0)
283#define set_current_state(state_value) \
284 do { \
285 current->task_state_change = _THIS_IP_; \
286 smp_store_mb(current->state, (state_value)); \
287 } while (0)
288
289#else
290
291#define __set_task_state(tsk, state_value) \
292 do { (tsk)->state = (state_value); } while (0)
293#define set_task_state(tsk, state_value) \
294 smp_store_mb((tsk)->state, (state_value))
295
296
297
298
299
300
301
302
303
304
305
306
307#define __set_current_state(state_value) \
308 do { current->state = (state_value); } while (0)
309#define set_current_state(state_value) \
310 smp_store_mb(current->state, (state_value))
311
312#endif
313
314
315#define TASK_COMM_LEN 16
316
317#include <linux/spinlock.h>
318
319
320
321
322
323
324
325extern rwlock_t tasklist_lock;
326extern spinlock_t mmlist_lock;
327
328struct task_struct;
329
330#ifdef CONFIG_PROVE_RCU
331extern int lockdep_tasklist_lock_is_held(void);
332#endif
333
334extern void sched_init(void);
335extern void sched_init_smp(void);
336extern asmlinkage void schedule_tail(struct task_struct *prev);
337extern void init_idle(struct task_struct *idle, int cpu);
338extern void init_idle_bootup_task(struct task_struct *idle);
339
340extern cpumask_var_t cpu_isolated_map;
341
342extern int runqueue_is_locked(int cpu);
343
344#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
345extern void nohz_balance_enter_idle(int cpu);
346extern void set_cpu_sd_state_idle(void);
347extern int get_nohz_timer_target(void);
348#else
349static inline void nohz_balance_enter_idle(int cpu) { }
350static inline void set_cpu_sd_state_idle(void) { }
351#endif
352
353
354
355
356extern void show_state_filter(unsigned long state_filter);
357
358static inline void show_state(void)
359{
360 show_state_filter(0);
361}
362
363extern void show_regs(struct pt_regs *);
364
365
366
367
368
369
370extern void show_stack(struct task_struct *task, unsigned long *sp);
371
372extern void cpu_init (void);
373extern void trap_init(void);
374extern void update_process_times(int user);
375extern void scheduler_tick(void);
376extern int sched_cpu_starting(unsigned int cpu);
377extern int sched_cpu_activate(unsigned int cpu);
378extern int sched_cpu_deactivate(unsigned int cpu);
379
380#ifdef CONFIG_HOTPLUG_CPU
381extern int sched_cpu_dying(unsigned int cpu);
382#else
383# define sched_cpu_dying NULL
384#endif
385
386extern void sched_show_task(struct task_struct *p);
387
388#ifdef CONFIG_LOCKUP_DETECTOR
389extern void touch_softlockup_watchdog_sched(void);
390extern void touch_softlockup_watchdog(void);
391extern void touch_softlockup_watchdog_sync(void);
392extern void touch_all_softlockup_watchdogs(void);
393extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
394 void __user *buffer,
395 size_t *lenp, loff_t *ppos);
396extern unsigned int softlockup_panic;
397extern unsigned int hardlockup_panic;
398void lockup_detector_init(void);
399#else
400static inline void touch_softlockup_watchdog_sched(void)
401{
402}
403static inline void touch_softlockup_watchdog(void)
404{
405}
406static inline void touch_softlockup_watchdog_sync(void)
407{
408}
409static inline void touch_all_softlockup_watchdogs(void)
410{
411}
412static inline void lockup_detector_init(void)
413{
414}
415#endif
416
417#ifdef CONFIG_DETECT_HUNG_TASK
418void reset_hung_task_detector(void);
419#else
420static inline void reset_hung_task_detector(void)
421{
422}
423#endif
424
425
426#define __sched __attribute__((__section__(".sched.text")))
427
428
429extern char __sched_text_start[], __sched_text_end[];
430
431
432extern int in_sched_functions(unsigned long addr);
433
434#define MAX_SCHEDULE_TIMEOUT LONG_MAX
435extern signed long schedule_timeout(signed long timeout);
436extern signed long schedule_timeout_interruptible(signed long timeout);
437extern signed long schedule_timeout_killable(signed long timeout);
438extern signed long schedule_timeout_uninterruptible(signed long timeout);
439extern signed long schedule_timeout_idle(signed long timeout);
440asmlinkage void schedule(void);
441extern void schedule_preempt_disabled(void);
442
443extern long io_schedule_timeout(long timeout);
444
445static inline void io_schedule(void)
446{
447 io_schedule_timeout(MAX_SCHEDULE_TIMEOUT);
448}
449
450struct nsproxy;
451struct user_namespace;
452
453#ifdef CONFIG_MMU
454extern void arch_pick_mmap_layout(struct mm_struct *mm);
455extern unsigned long
456arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
457 unsigned long, unsigned long);
458extern unsigned long
459arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
460 unsigned long len, unsigned long pgoff,
461 unsigned long flags);
462#else
463static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
464#endif
465
466#define SUID_DUMP_DISABLE 0
467#define SUID_DUMP_USER 1
468#define SUID_DUMP_ROOT 2
469
470
471
472
473#define MMF_DUMPABLE_BITS 2
474#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
475
476extern void set_dumpable(struct mm_struct *mm, int value);
477
478
479
480
481
482
483static inline int __get_dumpable(unsigned long mm_flags)
484{
485 return mm_flags & MMF_DUMPABLE_MASK;
486}
487
488static inline int get_dumpable(struct mm_struct *mm)
489{
490 return __get_dumpable(mm->flags);
491}
492
493
494#define MMF_DUMP_ANON_PRIVATE 2
495#define MMF_DUMP_ANON_SHARED 3
496#define MMF_DUMP_MAPPED_PRIVATE 4
497#define MMF_DUMP_MAPPED_SHARED 5
498#define MMF_DUMP_ELF_HEADERS 6
499#define MMF_DUMP_HUGETLB_PRIVATE 7
500#define MMF_DUMP_HUGETLB_SHARED 8
501#define MMF_DUMP_DAX_PRIVATE 9
502#define MMF_DUMP_DAX_SHARED 10
503
504#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
505#define MMF_DUMP_FILTER_BITS 9
506#define MMF_DUMP_FILTER_MASK \
507 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
508#define MMF_DUMP_FILTER_DEFAULT \
509 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\
510 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
511
512#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
513# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS)
514#else
515# define MMF_DUMP_MASK_DEFAULT_ELF 0
516#endif
517
518#define MMF_VM_MERGEABLE 16
519#define MMF_VM_HUGEPAGE 17
520#define MMF_EXE_FILE_CHANGED 18
521
522#define MMF_HAS_UPROBES 19
523#define MMF_RECALC_UPROBES 20
524#define MMF_OOM_REAPED 21
525
526#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
527
528struct sighand_struct {
529 atomic_t count;
530 struct k_sigaction action[_NSIG];
531 spinlock_t siglock;
532 wait_queue_head_t signalfd_wqh;
533};
534
535struct pacct_struct {
536 int ac_flag;
537 long ac_exitcode;
538 unsigned long ac_mem;
539 cputime_t ac_utime, ac_stime;
540 unsigned long ac_minflt, ac_majflt;
541};
542
543struct cpu_itimer {
544 cputime_t expires;
545 cputime_t incr;
546 u32 error;
547 u32 incr_error;
548};
549
550
551
552
553
554
555
556
557
558
559struct prev_cputime {
560#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
561 cputime_t utime;
562 cputime_t stime;
563 raw_spinlock_t lock;
564#endif
565};
566
567static inline void prev_cputime_init(struct prev_cputime *prev)
568{
569#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
570 prev->utime = prev->stime = 0;
571 raw_spin_lock_init(&prev->lock);
572#endif
573}
574
575
576
577
578
579
580
581
582
583
584
585struct task_cputime {
586 cputime_t utime;
587 cputime_t stime;
588 unsigned long long sum_exec_runtime;
589};
590
591
592#define virt_exp utime
593#define prof_exp stime
594#define sched_exp sum_exec_runtime
595
596#define INIT_CPUTIME \
597 (struct task_cputime) { \
598 .utime = 0, \
599 .stime = 0, \
600 .sum_exec_runtime = 0, \
601 }
602
603
604
605
606
607struct task_cputime_atomic {
608 atomic64_t utime;
609 atomic64_t stime;
610 atomic64_t sum_exec_runtime;
611};
612
613#define INIT_CPUTIME_ATOMIC \
614 (struct task_cputime_atomic) { \
615 .utime = ATOMIC64_INIT(0), \
616 .stime = ATOMIC64_INIT(0), \
617 .sum_exec_runtime = ATOMIC64_INIT(0), \
618 }
619
620#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
621
622
623
624
625
626
627
628#define INIT_PREEMPT_COUNT PREEMPT_OFFSET
629
630
631
632
633
634
635
636
637
638
639#define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
640
641
642
643
644
645
646
647
648
649
650
651
652struct thread_group_cputimer {
653 struct task_cputime_atomic cputime_atomic;
654 bool running;
655 bool checking_timer;
656};
657
658#include <linux/rwsem.h>
659struct autogroup;
660
661
662
663
664
665
666
667
668struct signal_struct {
669 atomic_t sigcnt;
670 atomic_t live;
671 int nr_threads;
672 atomic_t oom_victims;
673 struct list_head thread_head;
674
675 wait_queue_head_t wait_chldexit;
676
677
678 struct task_struct *curr_target;
679
680
681 struct sigpending shared_pending;
682
683
684 int group_exit_code;
685
686
687
688
689
690 int notify_count;
691 struct task_struct *group_exit_task;
692
693
694 int group_stop_count;
695 unsigned int flags;
696
697
698
699
700
701
702
703
704
705
706 unsigned int is_child_subreaper:1;
707 unsigned int has_child_subreaper:1;
708
709
710 int posix_timer_id;
711 struct list_head posix_timers;
712
713
714 struct hrtimer real_timer;
715 struct pid *leader_pid;
716 ktime_t it_real_incr;
717
718
719
720
721
722
723 struct cpu_itimer it[2];
724
725
726
727
728
729 struct thread_group_cputimer cputimer;
730
731
732 struct task_cputime cputime_expires;
733
734#ifdef CONFIG_NO_HZ_FULL
735 atomic_t tick_dep_mask;
736#endif
737
738 struct list_head cpu_timers[3];
739
740 struct pid *tty_old_pgrp;
741
742
743 int leader;
744
745 struct tty_struct *tty;
746
747#ifdef CONFIG_SCHED_AUTOGROUP
748 struct autogroup *autogroup;
749#endif
750
751
752
753
754
755
756 seqlock_t stats_lock;
757 cputime_t utime, stime, cutime, cstime;
758 cputime_t gtime;
759 cputime_t cgtime;
760 struct prev_cputime prev_cputime;
761 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
762 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
763 unsigned long inblock, oublock, cinblock, coublock;
764 unsigned long maxrss, cmaxrss;
765 struct task_io_accounting ioac;
766
767
768
769
770
771
772
773 unsigned long long sum_sched_runtime;
774
775
776
777
778
779
780
781
782
783
784 struct rlimit rlim[RLIM_NLIMITS];
785
786#ifdef CONFIG_BSD_PROCESS_ACCT
787 struct pacct_struct pacct;
788#endif
789#ifdef CONFIG_TASKSTATS
790 struct taskstats *stats;
791#endif
792#ifdef CONFIG_AUDIT
793 unsigned audit_tty;
794 struct tty_audit_buf *tty_audit_buf;
795#endif
796
797
798
799
800
801 bool oom_flag_origin;
802 short oom_score_adj;
803 short oom_score_adj_min;
804
805
806 struct mutex cred_guard_mutex;
807
808
809};
810
811
812
813
814#define SIGNAL_STOP_STOPPED 0x00000001
815#define SIGNAL_STOP_CONTINUED 0x00000002
816#define SIGNAL_GROUP_EXIT 0x00000004
817#define SIGNAL_GROUP_COREDUMP 0x00000008
818
819
820
821#define SIGNAL_CLD_STOPPED 0x00000010
822#define SIGNAL_CLD_CONTINUED 0x00000020
823#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
824
825#define SIGNAL_UNKILLABLE 0x00000040
826
827
828static inline int signal_group_exit(const struct signal_struct *sig)
829{
830 return (sig->flags & SIGNAL_GROUP_EXIT) ||
831 (sig->group_exit_task != NULL);
832}
833
834
835
836
837struct user_struct {
838 atomic_t __count;
839 atomic_t processes;
840 atomic_t sigpending;
841#ifdef CONFIG_INOTIFY_USER
842 atomic_t inotify_watches;
843 atomic_t inotify_devs;
844#endif
845#ifdef CONFIG_FANOTIFY
846 atomic_t fanotify_listeners;
847#endif
848#ifdef CONFIG_EPOLL
849 atomic_long_t epoll_watches;
850#endif
851#ifdef CONFIG_POSIX_MQUEUE
852
853 unsigned long mq_bytes;
854#endif
855 unsigned long locked_shm;
856 unsigned long unix_inflight;
857 atomic_long_t pipe_bufs;
858
859#ifdef CONFIG_KEYS
860 struct key *uid_keyring;
861 struct key *session_keyring;
862#endif
863
864
865 struct hlist_node uidhash_node;
866 kuid_t uid;
867
868#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL)
869 atomic_long_t locked_vm;
870#endif
871};
872
873extern int uids_sysfs_init(void);
874
875extern struct user_struct *find_user(kuid_t);
876
877extern struct user_struct root_user;
878#define INIT_USER (&root_user)
879
880
881struct backing_dev_info;
882struct reclaim_state;
883
884#ifdef CONFIG_SCHED_INFO
885struct sched_info {
886
887 unsigned long pcount;
888 unsigned long long run_delay;
889
890
891 unsigned long long last_arrival,
892 last_queued;
893};
894#endif
895
896#ifdef CONFIG_TASK_DELAY_ACCT
897struct task_delay_info {
898 spinlock_t lock;
899 unsigned int flags;
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916 u64 blkio_start;
917 u64 blkio_delay;
918 u64 swapin_delay;
919 u32 blkio_count;
920
921 u32 swapin_count;
922
923
924 u64 freepages_start;
925 u64 freepages_delay;
926 u32 freepages_count;
927};
928#endif
929
930static inline int sched_info_on(void)
931{
932#ifdef CONFIG_SCHEDSTATS
933 return 1;
934#elif defined(CONFIG_TASK_DELAY_ACCT)
935 extern int delayacct_on;
936 return delayacct_on;
937#else
938 return 0;
939#endif
940}
941
942#ifdef CONFIG_SCHEDSTATS
943void force_schedstat_enabled(void);
944#endif
945
946enum cpu_idle_type {
947 CPU_IDLE,
948 CPU_NOT_IDLE,
949 CPU_NEWLY_IDLE,
950 CPU_MAX_IDLE_TYPES
951};
952
953
954
955
956
957
958
959
960# define SCHED_FIXEDPOINT_SHIFT 10
961# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT)
962
963
964
965
966#define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT
967#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995struct wake_q_node {
996 struct wake_q_node *next;
997};
998
999struct wake_q_head {
1000 struct wake_q_node *first;
1001 struct wake_q_node **lastp;
1002};
1003
1004#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
1005
1006#define WAKE_Q(name) \
1007 struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
1008
1009extern void wake_q_add(struct wake_q_head *head,
1010 struct task_struct *task);
1011extern void wake_up_q(struct wake_q_head *head);
1012
1013
1014
1015
1016#ifdef CONFIG_SMP
1017#define SD_LOAD_BALANCE 0x0001
1018#define SD_BALANCE_NEWIDLE 0x0002
1019#define SD_BALANCE_EXEC 0x0004
1020#define SD_BALANCE_FORK 0x0008
1021#define SD_BALANCE_WAKE 0x0010
1022#define SD_WAKE_AFFINE 0x0020
1023#define SD_SHARE_CPUCAPACITY 0x0080
1024#define SD_SHARE_POWERDOMAIN 0x0100
1025#define SD_SHARE_PKG_RESOURCES 0x0200
1026#define SD_SERIALIZE 0x0400
1027#define SD_ASYM_PACKING 0x0800
1028#define SD_PREFER_SIBLING 0x1000
1029#define SD_OVERLAP 0x2000
1030#define SD_NUMA 0x4000
1031
1032#ifdef CONFIG_SCHED_SMT
1033static inline int cpu_smt_flags(void)
1034{
1035 return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
1036}
1037#endif
1038
1039#ifdef CONFIG_SCHED_MC
1040static inline int cpu_core_flags(void)
1041{
1042 return SD_SHARE_PKG_RESOURCES;
1043}
1044#endif
1045
1046#ifdef CONFIG_NUMA
1047static inline int cpu_numa_flags(void)
1048{
1049 return SD_NUMA;
1050}
1051#endif
1052
1053struct sched_domain_attr {
1054 int relax_domain_level;
1055};
1056
1057#define SD_ATTR_INIT (struct sched_domain_attr) { \
1058 .relax_domain_level = -1, \
1059}
1060
1061extern int sched_domain_level_max;
1062
1063struct sched_group;
1064
1065struct sched_domain {
1066
1067 struct sched_domain *parent;
1068 struct sched_domain *child;
1069 struct sched_group *groups;
1070 unsigned long min_interval;
1071 unsigned long max_interval;
1072 unsigned int busy_factor;
1073 unsigned int imbalance_pct;
1074 unsigned int cache_nice_tries;
1075 unsigned int busy_idx;
1076 unsigned int idle_idx;
1077 unsigned int newidle_idx;
1078 unsigned int wake_idx;
1079 unsigned int forkexec_idx;
1080 unsigned int smt_gain;
1081
1082 int nohz_idle;
1083 int flags;
1084 int level;
1085
1086
1087 unsigned long last_balance;
1088 unsigned int balance_interval;
1089 unsigned int nr_balance_failed;
1090
1091
1092 u64 max_newidle_lb_cost;
1093 unsigned long next_decay_max_lb_cost;
1094
1095#ifdef CONFIG_SCHEDSTATS
1096
1097 unsigned int lb_count[CPU_MAX_IDLE_TYPES];
1098 unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
1099 unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
1100 unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
1101 unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
1102 unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
1103 unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
1104 unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
1105
1106
1107 unsigned int alb_count;
1108 unsigned int alb_failed;
1109 unsigned int alb_pushed;
1110
1111
1112 unsigned int sbe_count;
1113 unsigned int sbe_balanced;
1114 unsigned int sbe_pushed;
1115
1116
1117 unsigned int sbf_count;
1118 unsigned int sbf_balanced;
1119 unsigned int sbf_pushed;
1120
1121
1122 unsigned int ttwu_wake_remote;
1123 unsigned int ttwu_move_affine;
1124 unsigned int ttwu_move_balance;
1125#endif
1126#ifdef CONFIG_SCHED_DEBUG
1127 char *name;
1128#endif
1129 union {
1130 void *private;
1131 struct rcu_head rcu;
1132 };
1133
1134 unsigned int span_weight;
1135
1136
1137
1138
1139
1140
1141
1142 unsigned long span[0];
1143};
1144
1145static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
1146{
1147 return to_cpumask(sd->span);
1148}
1149
1150extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1151 struct sched_domain_attr *dattr_new);
1152
1153
1154cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
1155void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
1156
1157bool cpus_share_cache(int this_cpu, int that_cpu);
1158
1159typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
1160typedef int (*sched_domain_flags_f)(void);
1161
1162#define SDTL_OVERLAP 0x01
1163
1164struct sd_data {
1165 struct sched_domain **__percpu sd;
1166 struct sched_group **__percpu sg;
1167 struct sched_group_capacity **__percpu sgc;
1168};
1169
1170struct sched_domain_topology_level {
1171 sched_domain_mask_f mask;
1172 sched_domain_flags_f sd_flags;
1173 int flags;
1174 int numa_level;
1175 struct sd_data data;
1176#ifdef CONFIG_SCHED_DEBUG
1177 char *name;
1178#endif
1179};
1180
1181extern void set_sched_topology(struct sched_domain_topology_level *tl);
1182extern void wake_up_if_idle(int cpu);
1183
1184#ifdef CONFIG_SCHED_DEBUG
1185# define SD_INIT_NAME(type) .name = #type
1186#else
1187# define SD_INIT_NAME(type)
1188#endif
1189
1190#else
1191
1192struct sched_domain_attr;
1193
1194static inline void
1195partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1196 struct sched_domain_attr *dattr_new)
1197{
1198}
1199
1200static inline bool cpus_share_cache(int this_cpu, int that_cpu)
1201{
1202 return true;
1203}
1204
1205#endif
1206
1207
1208struct io_context;
1209
1210
1211#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
1212extern void prefetch_stack(struct task_struct *t);
1213#else
1214static inline void prefetch_stack(struct task_struct *t) { }
1215#endif
1216
1217struct audit_context;
1218struct mempolicy;
1219struct pipe_inode_info;
1220struct uts_namespace;
1221
1222struct load_weight {
1223 unsigned long weight;
1224 u32 inv_weight;
1225};
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279struct sched_avg {
1280 u64 last_update_time, load_sum;
1281 u32 util_sum, period_contrib;
1282 unsigned long load_avg, util_avg;
1283};
1284
1285#ifdef CONFIG_SCHEDSTATS
1286struct sched_statistics {
1287 u64 wait_start;
1288 u64 wait_max;
1289 u64 wait_count;
1290 u64 wait_sum;
1291 u64 iowait_count;
1292 u64 iowait_sum;
1293
1294 u64 sleep_start;
1295 u64 sleep_max;
1296 s64 sum_sleep_runtime;
1297
1298 u64 block_start;
1299 u64 block_max;
1300 u64 exec_max;
1301 u64 slice_max;
1302
1303 u64 nr_migrations_cold;
1304 u64 nr_failed_migrations_affine;
1305 u64 nr_failed_migrations_running;
1306 u64 nr_failed_migrations_hot;
1307 u64 nr_forced_migrations;
1308
1309 u64 nr_wakeups;
1310 u64 nr_wakeups_sync;
1311 u64 nr_wakeups_migrate;
1312 u64 nr_wakeups_local;
1313 u64 nr_wakeups_remote;
1314 u64 nr_wakeups_affine;
1315 u64 nr_wakeups_affine_attempts;
1316 u64 nr_wakeups_passive;
1317 u64 nr_wakeups_idle;
1318};
1319#endif
1320
1321struct sched_entity {
1322 struct load_weight load;
1323 struct rb_node run_node;
1324 struct list_head group_node;
1325 unsigned int on_rq;
1326
1327 u64 exec_start;
1328 u64 sum_exec_runtime;
1329 u64 vruntime;
1330 u64 prev_sum_exec_runtime;
1331
1332 u64 nr_migrations;
1333
1334#ifdef CONFIG_SCHEDSTATS
1335 struct sched_statistics statistics;
1336#endif
1337
1338#ifdef CONFIG_FAIR_GROUP_SCHED
1339 int depth;
1340 struct sched_entity *parent;
1341
1342 struct cfs_rq *cfs_rq;
1343
1344 struct cfs_rq *my_q;
1345#endif
1346
1347#ifdef CONFIG_SMP
1348
1349
1350
1351
1352
1353
1354 struct sched_avg avg ____cacheline_aligned_in_smp;
1355#endif
1356};
1357
1358struct sched_rt_entity {
1359 struct list_head run_list;
1360 unsigned long timeout;
1361 unsigned long watchdog_stamp;
1362 unsigned int time_slice;
1363 unsigned short on_rq;
1364 unsigned short on_list;
1365
1366 struct sched_rt_entity *back;
1367#ifdef CONFIG_RT_GROUP_SCHED
1368 struct sched_rt_entity *parent;
1369
1370 struct rt_rq *rt_rq;
1371
1372 struct rt_rq *my_q;
1373#endif
1374};
1375
1376struct sched_dl_entity {
1377 struct rb_node rb_node;
1378
1379
1380
1381
1382
1383
1384 u64 dl_runtime;
1385 u64 dl_deadline;
1386 u64 dl_period;
1387 u64 dl_bw;
1388
1389
1390
1391
1392
1393
1394 s64 runtime;
1395 u64 deadline;
1396 unsigned int flags;
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412 int dl_throttled, dl_boosted, dl_yielded;
1413
1414
1415
1416
1417
1418 struct hrtimer dl_timer;
1419};
1420
1421union rcu_special {
1422 struct {
1423 u8 blocked;
1424 u8 need_qs;
1425 u8 exp_need_qs;
1426 u8 pad;
1427 } b;
1428 u32 s;
1429};
1430struct rcu_node;
1431
1432enum perf_event_task_context {
1433 perf_invalid_context = -1,
1434 perf_hw_context = 0,
1435 perf_sw_context,
1436 perf_nr_task_contexts,
1437};
1438
1439
1440struct tlbflush_unmap_batch {
1441
1442
1443
1444
1445 struct cpumask cpumask;
1446
1447
1448 bool flush_required;
1449
1450
1451
1452
1453
1454
1455 bool writable;
1456};
1457
1458struct task_struct {
1459 volatile long state;
1460 void *stack;
1461 atomic_t usage;
1462 unsigned int flags;
1463 unsigned int ptrace;
1464
1465#ifdef CONFIG_SMP
1466 struct llist_node wake_entry;
1467 int on_cpu;
1468 unsigned int wakee_flips;
1469 unsigned long wakee_flip_decay_ts;
1470 struct task_struct *last_wakee;
1471
1472 int wake_cpu;
1473#endif
1474 int on_rq;
1475
1476 int prio, static_prio, normal_prio;
1477 unsigned int rt_priority;
1478 const struct sched_class *sched_class;
1479 struct sched_entity se;
1480 struct sched_rt_entity rt;
1481#ifdef CONFIG_CGROUP_SCHED
1482 struct task_group *sched_task_group;
1483#endif
1484 struct sched_dl_entity dl;
1485
1486#ifdef CONFIG_PREEMPT_NOTIFIERS
1487
1488 struct hlist_head preempt_notifiers;
1489#endif
1490
1491#ifdef CONFIG_BLK_DEV_IO_TRACE
1492 unsigned int btrace_seq;
1493#endif
1494
1495 unsigned int policy;
1496 int nr_cpus_allowed;
1497 cpumask_t cpus_allowed;
1498
1499#ifdef CONFIG_PREEMPT_RCU
1500 int rcu_read_lock_nesting;
1501 union rcu_special rcu_read_unlock_special;
1502 struct list_head rcu_node_entry;
1503 struct rcu_node *rcu_blocked_node;
1504#endif
1505#ifdef CONFIG_TASKS_RCU
1506 unsigned long rcu_tasks_nvcsw;
1507 bool rcu_tasks_holdout;
1508 struct list_head rcu_tasks_holdout_list;
1509 int rcu_tasks_idle_cpu;
1510#endif
1511
1512#ifdef CONFIG_SCHED_INFO
1513 struct sched_info sched_info;
1514#endif
1515
1516 struct list_head tasks;
1517#ifdef CONFIG_SMP
1518 struct plist_node pushable_tasks;
1519 struct rb_node pushable_dl_tasks;
1520#endif
1521
1522 struct mm_struct *mm, *active_mm;
1523
1524 u32 vmacache_seqnum;
1525 struct vm_area_struct *vmacache[VMACACHE_SIZE];
1526#if defined(SPLIT_RSS_COUNTING)
1527 struct task_rss_stat rss_stat;
1528#endif
1529
1530 int exit_state;
1531 int exit_code, exit_signal;
1532 int pdeath_signal;
1533 unsigned long jobctl;
1534
1535
1536 unsigned int personality;
1537
1538
1539 unsigned sched_reset_on_fork:1;
1540 unsigned sched_contributes_to_load:1;
1541 unsigned sched_migrated:1;
1542 unsigned sched_remote_wakeup:1;
1543 unsigned :0;
1544
1545
1546 unsigned in_execve:1;
1547 unsigned in_iowait:1;
1548#ifdef CONFIG_MEMCG
1549 unsigned memcg_may_oom:1;
1550#ifndef CONFIG_SLOB
1551 unsigned memcg_kmem_skip_account:1;
1552#endif
1553#endif
1554#ifdef CONFIG_COMPAT_BRK
1555 unsigned brk_randomized:1;
1556#endif
1557
1558 unsigned long atomic_flags;
1559
1560 struct restart_block restart_block;
1561
1562 pid_t pid;
1563 pid_t tgid;
1564
1565#ifdef CONFIG_CC_STACKPROTECTOR
1566
1567 unsigned long stack_canary;
1568#endif
1569
1570
1571
1572
1573
1574 struct task_struct __rcu *real_parent;
1575 struct task_struct __rcu *parent;
1576
1577
1578
1579 struct list_head children;
1580 struct list_head sibling;
1581 struct task_struct *group_leader;
1582
1583
1584
1585
1586
1587
1588 struct list_head ptraced;
1589 struct list_head ptrace_entry;
1590
1591
1592 struct pid_link pids[PIDTYPE_MAX];
1593 struct list_head thread_group;
1594 struct list_head thread_node;
1595
1596 struct completion *vfork_done;
1597 int __user *set_child_tid;
1598 int __user *clear_child_tid;
1599
1600 cputime_t utime, stime, utimescaled, stimescaled;
1601 cputime_t gtime;
1602 struct prev_cputime prev_cputime;
1603#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1604 seqcount_t vtime_seqcount;
1605 unsigned long long vtime_snap;
1606 enum {
1607
1608 VTIME_INACTIVE = 0,
1609
1610 VTIME_USER,
1611
1612 VTIME_SYS,
1613 } vtime_snap_whence;
1614#endif
1615
1616#ifdef CONFIG_NO_HZ_FULL
1617 atomic_t tick_dep_mask;
1618#endif
1619 unsigned long nvcsw, nivcsw;
1620 u64 start_time;
1621 u64 real_start_time;
1622
1623 unsigned long min_flt, maj_flt;
1624
1625 struct task_cputime cputime_expires;
1626 struct list_head cpu_timers[3];
1627
1628
1629 const struct cred __rcu *real_cred;
1630
1631 const struct cred __rcu *cred;
1632
1633 char comm[TASK_COMM_LEN];
1634
1635
1636
1637
1638 struct nameidata *nameidata;
1639#ifdef CONFIG_SYSVIPC
1640
1641 struct sysv_sem sysvsem;
1642 struct sysv_shm sysvshm;
1643#endif
1644#ifdef CONFIG_DETECT_HUNG_TASK
1645
1646 unsigned long last_switch_count;
1647#endif
1648
1649 struct fs_struct *fs;
1650
1651 struct files_struct *files;
1652
1653 struct nsproxy *nsproxy;
1654
1655 struct signal_struct *signal;
1656 struct sighand_struct *sighand;
1657
1658 sigset_t blocked, real_blocked;
1659 sigset_t saved_sigmask;
1660 struct sigpending pending;
1661
1662 unsigned long sas_ss_sp;
1663 size_t sas_ss_size;
1664 unsigned sas_ss_flags;
1665
1666 struct callback_head *task_works;
1667
1668 struct audit_context *audit_context;
1669#ifdef CONFIG_AUDITSYSCALL
1670 kuid_t loginuid;
1671 unsigned int sessionid;
1672#endif
1673 struct seccomp seccomp;
1674
1675
1676 u32 parent_exec_id;
1677 u32 self_exec_id;
1678
1679
1680 spinlock_t alloc_lock;
1681
1682
1683 raw_spinlock_t pi_lock;
1684
1685 struct wake_q_node wake_q;
1686
1687#ifdef CONFIG_RT_MUTEXES
1688
1689 struct rb_root pi_waiters;
1690 struct rb_node *pi_waiters_leftmost;
1691
1692 struct rt_mutex_waiter *pi_blocked_on;
1693#endif
1694
1695#ifdef CONFIG_DEBUG_MUTEXES
1696
1697 struct mutex_waiter *blocked_on;
1698#endif
1699#ifdef CONFIG_TRACE_IRQFLAGS
1700 unsigned int irq_events;
1701 unsigned long hardirq_enable_ip;
1702 unsigned long hardirq_disable_ip;
1703 unsigned int hardirq_enable_event;
1704 unsigned int hardirq_disable_event;
1705 int hardirqs_enabled;
1706 int hardirq_context;
1707 unsigned long softirq_disable_ip;
1708 unsigned long softirq_enable_ip;
1709 unsigned int softirq_disable_event;
1710 unsigned int softirq_enable_event;
1711 int softirqs_enabled;
1712 int softirq_context;
1713#endif
1714#ifdef CONFIG_LOCKDEP
1715# define MAX_LOCK_DEPTH 48UL
1716 u64 curr_chain_key;
1717 int lockdep_depth;
1718 unsigned int lockdep_recursion;
1719 struct held_lock held_locks[MAX_LOCK_DEPTH];
1720 gfp_t lockdep_reclaim_gfp;
1721#endif
1722#ifdef CONFIG_UBSAN
1723 unsigned int in_ubsan;
1724#endif
1725
1726
1727 void *journal_info;
1728
1729
1730 struct bio_list *bio_list;
1731
1732#ifdef CONFIG_BLOCK
1733
1734 struct blk_plug *plug;
1735#endif
1736
1737
1738 struct reclaim_state *reclaim_state;
1739
1740 struct backing_dev_info *backing_dev_info;
1741
1742 struct io_context *io_context;
1743
1744 unsigned long ptrace_message;
1745 siginfo_t *last_siginfo;
1746 struct task_io_accounting ioac;
1747#if defined(CONFIG_TASK_XACCT)
1748 u64 acct_rss_mem1;
1749 u64 acct_vm_mem1;
1750 cputime_t acct_timexpd;
1751#endif
1752#ifdef CONFIG_CPUSETS
1753 nodemask_t mems_allowed;
1754 seqcount_t mems_allowed_seq;
1755 int cpuset_mem_spread_rotor;
1756 int cpuset_slab_spread_rotor;
1757#endif
1758#ifdef CONFIG_CGROUPS
1759
1760 struct css_set __rcu *cgroups;
1761
1762 struct list_head cg_list;
1763#endif
1764#ifdef CONFIG_FUTEX
1765 struct robust_list_head __user *robust_list;
1766#ifdef CONFIG_COMPAT
1767 struct compat_robust_list_head __user *compat_robust_list;
1768#endif
1769 struct list_head pi_state_list;
1770 struct futex_pi_state *pi_state_cache;
1771#endif
1772#ifdef CONFIG_PERF_EVENTS
1773 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1774 struct mutex perf_event_mutex;
1775 struct list_head perf_event_list;
1776#endif
1777#ifdef CONFIG_DEBUG_PREEMPT
1778 unsigned long preempt_disable_ip;
1779#endif
1780#ifdef CONFIG_NUMA
1781 struct mempolicy *mempolicy;
1782 short il_next;
1783 short pref_node_fork;
1784#endif
1785#ifdef CONFIG_NUMA_BALANCING
1786 int numa_scan_seq;
1787 unsigned int numa_scan_period;
1788 unsigned int numa_scan_period_max;
1789 int numa_preferred_nid;
1790 unsigned long numa_migrate_retry;
1791 u64 node_stamp;
1792 u64 last_task_numa_placement;
1793 u64 last_sum_exec_runtime;
1794 struct callback_head numa_work;
1795
1796 struct list_head numa_entry;
1797 struct numa_group *numa_group;
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813 unsigned long *numa_faults;
1814 unsigned long total_numa_faults;
1815
1816
1817
1818
1819
1820
1821
1822 unsigned long numa_faults_locality[3];
1823
1824 unsigned long numa_pages_migrated;
1825#endif
1826
1827#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
1828 struct tlbflush_unmap_batch tlb_ubc;
1829#endif
1830
1831 struct rcu_head rcu;
1832
1833
1834
1835
1836 struct pipe_inode_info *splice_pipe;
1837
1838 struct page_frag task_frag;
1839
1840#ifdef CONFIG_TASK_DELAY_ACCT
1841 struct task_delay_info *delays;
1842#endif
1843#ifdef CONFIG_FAULT_INJECTION
1844 int make_it_fail;
1845#endif
1846
1847
1848
1849
1850 int nr_dirtied;
1851 int nr_dirtied_pause;
1852 unsigned long dirty_paused_when;
1853
1854#ifdef CONFIG_LATENCYTOP
1855 int latency_record_count;
1856 struct latency_record latency_record[LT_SAVECOUNT];
1857#endif
1858
1859
1860
1861
1862 u64 timer_slack_ns;
1863 u64 default_timer_slack_ns;
1864
1865#ifdef CONFIG_KASAN
1866 unsigned int kasan_depth;
1867#endif
1868#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1869
1870 int curr_ret_stack;
1871
1872 struct ftrace_ret_stack *ret_stack;
1873
1874 unsigned long long ftrace_timestamp;
1875
1876
1877
1878
1879 atomic_t trace_overrun;
1880
1881 atomic_t tracing_graph_pause;
1882#endif
1883#ifdef CONFIG_TRACING
1884
1885 unsigned long trace;
1886
1887 unsigned long trace_recursion;
1888#endif
1889#ifdef CONFIG_KCOV
1890
1891 enum kcov_mode kcov_mode;
1892
1893 unsigned kcov_size;
1894
1895 void *kcov_area;
1896
1897 struct kcov *kcov;
1898#endif
1899#ifdef CONFIG_MEMCG
1900 struct mem_cgroup *memcg_in_oom;
1901 gfp_t memcg_oom_gfp_mask;
1902 int memcg_oom_order;
1903
1904
1905 unsigned int memcg_nr_pages_over_high;
1906#endif
1907#ifdef CONFIG_UPROBES
1908 struct uprobe_task *utask;
1909#endif
1910#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1911 unsigned int sequential_io;
1912 unsigned int sequential_io_avg;
1913#endif
1914#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1915 unsigned long task_state_change;
1916#endif
1917 int pagefault_disabled;
1918#ifdef CONFIG_MMU
1919 struct task_struct *oom_reaper_list;
1920#endif
1921
1922 struct thread_struct thread;
1923
1924
1925
1926
1927
1928
1929};
1930
1931#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
1932extern int arch_task_struct_size __read_mostly;
1933#else
1934# define arch_task_struct_size (sizeof(struct task_struct))
1935#endif
1936
1937
1938#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1939
1940static inline int tsk_nr_cpus_allowed(struct task_struct *p)
1941{
1942 return p->nr_cpus_allowed;
1943}
1944
1945#define TNF_MIGRATED 0x01
1946#define TNF_NO_GROUP 0x02
1947#define TNF_SHARED 0x04
1948#define TNF_FAULT_LOCAL 0x08
1949#define TNF_MIGRATE_FAIL 0x10
1950
1951#ifdef CONFIG_NUMA_BALANCING
1952extern void task_numa_fault(int last_node, int node, int pages, int flags);
1953extern pid_t task_numa_group_id(struct task_struct *p);
1954extern void set_numabalancing_state(bool enabled);
1955extern void task_numa_free(struct task_struct *p);
1956extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
1957 int src_nid, int dst_cpu);
1958#else
1959static inline void task_numa_fault(int last_node, int node, int pages,
1960 int flags)
1961{
1962}
1963static inline pid_t task_numa_group_id(struct task_struct *p)
1964{
1965 return 0;
1966}
1967static inline void set_numabalancing_state(bool enabled)
1968{
1969}
1970static inline void task_numa_free(struct task_struct *p)
1971{
1972}
1973static inline bool should_numa_migrate_memory(struct task_struct *p,
1974 struct page *page, int src_nid, int dst_cpu)
1975{
1976 return true;
1977}
1978#endif
1979
1980static inline struct pid *task_pid(struct task_struct *task)
1981{
1982 return task->pids[PIDTYPE_PID].pid;
1983}
1984
1985static inline struct pid *task_tgid(struct task_struct *task)
1986{
1987 return task->group_leader->pids[PIDTYPE_PID].pid;
1988}
1989
1990
1991
1992
1993
1994
1995static inline struct pid *task_pgrp(struct task_struct *task)
1996{
1997 return task->group_leader->pids[PIDTYPE_PGID].pid;
1998}
1999
2000static inline struct pid *task_session(struct task_struct *task)
2001{
2002 return task->group_leader->pids[PIDTYPE_SID].pid;
2003}
2004
2005struct pid_namespace;
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
2021 struct pid_namespace *ns);
2022
2023static inline pid_t task_pid_nr(struct task_struct *tsk)
2024{
2025 return tsk->pid;
2026}
2027
2028static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
2029 struct pid_namespace *ns)
2030{
2031 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
2032}
2033
2034static inline pid_t task_pid_vnr(struct task_struct *tsk)
2035{
2036 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
2037}
2038
2039
2040static inline pid_t task_tgid_nr(struct task_struct *tsk)
2041{
2042 return tsk->tgid;
2043}
2044
2045pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
2046
2047static inline pid_t task_tgid_vnr(struct task_struct *tsk)
2048{
2049 return pid_vnr(task_tgid(tsk));
2050}
2051
2052
2053static inline int pid_alive(const struct task_struct *p);
2054static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
2055{
2056 pid_t pid = 0;
2057
2058 rcu_read_lock();
2059 if (pid_alive(tsk))
2060 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
2061 rcu_read_unlock();
2062
2063 return pid;
2064}
2065
2066static inline pid_t task_ppid_nr(const struct task_struct *tsk)
2067{
2068 return task_ppid_nr_ns(tsk, &init_pid_ns);
2069}
2070
2071static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
2072 struct pid_namespace *ns)
2073{
2074 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
2075}
2076
2077static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
2078{
2079 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
2080}
2081
2082
2083static inline pid_t task_session_nr_ns(struct task_struct *tsk,
2084 struct pid_namespace *ns)
2085{
2086 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
2087}
2088
2089static inline pid_t task_session_vnr(struct task_struct *tsk)
2090{
2091 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
2092}
2093
2094
2095static inline pid_t task_pgrp_nr(struct task_struct *tsk)
2096{
2097 return task_pgrp_nr_ns(tsk, &init_pid_ns);
2098}
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110static inline int pid_alive(const struct task_struct *p)
2111{
2112 return p->pids[PIDTYPE_PID].pid != NULL;
2113}
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124static inline int is_global_init(struct task_struct *tsk)
2125{
2126 return task_tgid_nr(tsk) == 1;
2127}
2128
2129extern struct pid *cad_pid;
2130
2131extern void free_task(struct task_struct *tsk);
2132#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
2133
2134extern void __put_task_struct(struct task_struct *t);
2135
2136static inline void put_task_struct(struct task_struct *t)
2137{
2138 if (atomic_dec_and_test(&t->usage))
2139 __put_task_struct(t);
2140}
2141
2142#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
2143extern void task_cputime(struct task_struct *t,
2144 cputime_t *utime, cputime_t *stime);
2145extern void task_cputime_scaled(struct task_struct *t,
2146 cputime_t *utimescaled, cputime_t *stimescaled);
2147extern cputime_t task_gtime(struct task_struct *t);
2148#else
2149static inline void task_cputime(struct task_struct *t,
2150 cputime_t *utime, cputime_t *stime)
2151{
2152 if (utime)
2153 *utime = t->utime;
2154 if (stime)
2155 *stime = t->stime;
2156}
2157
2158static inline void task_cputime_scaled(struct task_struct *t,
2159 cputime_t *utimescaled,
2160 cputime_t *stimescaled)
2161{
2162 if (utimescaled)
2163 *utimescaled = t->utimescaled;
2164 if (stimescaled)
2165 *stimescaled = t->stimescaled;
2166}
2167
2168static inline cputime_t task_gtime(struct task_struct *t)
2169{
2170 return t->gtime;
2171}
2172#endif
2173extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
2174extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
2175
2176
2177
2178
2179#define PF_EXITING 0x00000004
2180#define PF_EXITPIDONE 0x00000008
2181#define PF_VCPU 0x00000010
2182#define PF_WQ_WORKER 0x00000020
2183#define PF_FORKNOEXEC 0x00000040
2184#define PF_MCE_PROCESS 0x00000080
2185#define PF_SUPERPRIV 0x00000100
2186#define PF_DUMPCORE 0x00000200
2187#define PF_SIGNALED 0x00000400
2188#define PF_MEMALLOC 0x00000800
2189#define PF_NPROC_EXCEEDED 0x00001000
2190#define PF_USED_MATH 0x00002000
2191#define PF_USED_ASYNC 0x00004000
2192#define PF_NOFREEZE 0x00008000
2193#define PF_FROZEN 0x00010000
2194#define PF_FSTRANS 0x00020000
2195#define PF_KSWAPD 0x00040000
2196#define PF_MEMALLOC_NOIO 0x00080000
2197#define PF_LESS_THROTTLE 0x00100000
2198#define PF_KTHREAD 0x00200000
2199#define PF_RANDOMIZE 0x00400000
2200#define PF_SWAPWRITE 0x00800000
2201#define PF_NO_SETAFFINITY 0x04000000
2202#define PF_MCE_EARLY 0x08000000
2203#define PF_MUTEX_TESTER 0x20000000
2204#define PF_FREEZER_SKIP 0x40000000
2205#define PF_SUSPEND_TASK 0x80000000
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
2219#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
2220#define clear_used_math() clear_stopped_child_used_math(current)
2221#define set_used_math() set_stopped_child_used_math(current)
2222#define conditional_stopped_child_used_math(condition, child) \
2223 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
2224#define conditional_used_math(condition) \
2225 conditional_stopped_child_used_math(condition, current)
2226#define copy_to_stopped_child_used_math(child) \
2227 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
2228
2229#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
2230#define used_math() tsk_used_math(current)
2231
2232
2233
2234
2235static inline gfp_t memalloc_noio_flags(gfp_t flags)
2236{
2237 if (unlikely(current->flags & PF_MEMALLOC_NOIO))
2238 flags &= ~(__GFP_IO | __GFP_FS);
2239 return flags;
2240}
2241
2242static inline unsigned int memalloc_noio_save(void)
2243{
2244 unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
2245 current->flags |= PF_MEMALLOC_NOIO;
2246 return flags;
2247}
2248
2249static inline void memalloc_noio_restore(unsigned int flags)
2250{
2251 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
2252}
2253
2254
2255#define PFA_NO_NEW_PRIVS 0
2256#define PFA_SPREAD_PAGE 1
2257#define PFA_SPREAD_SLAB 2
2258#define PFA_LMK_WAITING 3
2259
2260
2261#define TASK_PFA_TEST(name, func) \
2262 static inline bool task_##func(struct task_struct *p) \
2263 { return test_bit(PFA_##name, &p->atomic_flags); }
2264#define TASK_PFA_SET(name, func) \
2265 static inline void task_set_##func(struct task_struct *p) \
2266 { set_bit(PFA_##name, &p->atomic_flags); }
2267#define TASK_PFA_CLEAR(name, func) \
2268 static inline void task_clear_##func(struct task_struct *p) \
2269 { clear_bit(PFA_##name, &p->atomic_flags); }
2270
2271TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
2272TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
2273
2274TASK_PFA_TEST(SPREAD_PAGE, spread_page)
2275TASK_PFA_SET(SPREAD_PAGE, spread_page)
2276TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
2277
2278TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
2279TASK_PFA_SET(SPREAD_SLAB, spread_slab)
2280TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
2281
2282TASK_PFA_TEST(LMK_WAITING, lmk_waiting)
2283TASK_PFA_SET(LMK_WAITING, lmk_waiting)
2284
2285
2286
2287
2288#define JOBCTL_STOP_SIGMASK 0xffff
2289
2290#define JOBCTL_STOP_DEQUEUED_BIT 16
2291#define JOBCTL_STOP_PENDING_BIT 17
2292#define JOBCTL_STOP_CONSUME_BIT 18
2293#define JOBCTL_TRAP_STOP_BIT 19
2294#define JOBCTL_TRAP_NOTIFY_BIT 20
2295#define JOBCTL_TRAPPING_BIT 21
2296#define JOBCTL_LISTENING_BIT 22
2297
2298#define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT)
2299#define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT)
2300#define JOBCTL_STOP_CONSUME (1UL << JOBCTL_STOP_CONSUME_BIT)
2301#define JOBCTL_TRAP_STOP (1UL << JOBCTL_TRAP_STOP_BIT)
2302#define JOBCTL_TRAP_NOTIFY (1UL << JOBCTL_TRAP_NOTIFY_BIT)
2303#define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT)
2304#define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT)
2305
2306#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
2307#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
2308
2309extern bool task_set_jobctl_pending(struct task_struct *task,
2310 unsigned long mask);
2311extern void task_clear_jobctl_trapping(struct task_struct *task);
2312extern void task_clear_jobctl_pending(struct task_struct *task,
2313 unsigned long mask);
2314
2315static inline void rcu_copy_process(struct task_struct *p)
2316{
2317#ifdef CONFIG_PREEMPT_RCU
2318 p->rcu_read_lock_nesting = 0;
2319 p->rcu_read_unlock_special.s = 0;
2320 p->rcu_blocked_node = NULL;
2321 INIT_LIST_HEAD(&p->rcu_node_entry);
2322#endif
2323#ifdef CONFIG_TASKS_RCU
2324 p->rcu_tasks_holdout = false;
2325 INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
2326 p->rcu_tasks_idle_cpu = -1;
2327#endif
2328}
2329
2330static inline void tsk_restore_flags(struct task_struct *task,
2331 unsigned long orig_flags, unsigned long flags)
2332{
2333 task->flags &= ~flags;
2334 task->flags |= orig_flags & flags;
2335}
2336
2337extern int cpuset_cpumask_can_shrink(const struct cpumask *cur,
2338 const struct cpumask *trial);
2339extern int task_can_attach(struct task_struct *p,
2340 const struct cpumask *cs_cpus_allowed);
2341#ifdef CONFIG_SMP
2342extern void do_set_cpus_allowed(struct task_struct *p,
2343 const struct cpumask *new_mask);
2344
2345extern int set_cpus_allowed_ptr(struct task_struct *p,
2346 const struct cpumask *new_mask);
2347#else
2348static inline void do_set_cpus_allowed(struct task_struct *p,
2349 const struct cpumask *new_mask)
2350{
2351}
2352static inline int set_cpus_allowed_ptr(struct task_struct *p,
2353 const struct cpumask *new_mask)
2354{
2355 if (!cpumask_test_cpu(0, new_mask))
2356 return -EINVAL;
2357 return 0;
2358}
2359#endif
2360
2361#ifdef CONFIG_NO_HZ_COMMON
2362void calc_load_enter_idle(void);
2363void calc_load_exit_idle(void);
2364#else
2365static inline void calc_load_enter_idle(void) { }
2366static inline void calc_load_exit_idle(void) { }
2367#endif
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377extern unsigned long long notrace sched_clock(void);
2378
2379
2380
2381extern u64 running_clock(void);
2382extern u64 sched_clock_cpu(int cpu);
2383
2384
2385extern void sched_clock_init(void);
2386
2387#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2388static inline void sched_clock_tick(void)
2389{
2390}
2391
2392static inline void sched_clock_idle_sleep_event(void)
2393{
2394}
2395
2396static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
2397{
2398}
2399
2400static inline u64 cpu_clock(int cpu)
2401{
2402 return sched_clock();
2403}
2404
2405static inline u64 local_clock(void)
2406{
2407 return sched_clock();
2408}
2409#else
2410
2411
2412
2413
2414
2415
2416extern int sched_clock_stable(void);
2417extern void set_sched_clock_stable(void);
2418extern void clear_sched_clock_stable(void);
2419
2420extern void sched_clock_tick(void);
2421extern void sched_clock_idle_sleep_event(void);
2422extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434static inline u64 cpu_clock(int cpu)
2435{
2436 return sched_clock_cpu(cpu);
2437}
2438
2439static inline u64 local_clock(void)
2440{
2441 return sched_clock_cpu(raw_smp_processor_id());
2442}
2443#endif
2444
2445#ifdef CONFIG_IRQ_TIME_ACCOUNTING
2446
2447
2448
2449
2450
2451extern void enable_sched_clock_irqtime(void);
2452extern void disable_sched_clock_irqtime(void);
2453#else
2454static inline void enable_sched_clock_irqtime(void) {}
2455static inline void disable_sched_clock_irqtime(void) {}
2456#endif
2457
2458extern unsigned long long
2459task_sched_runtime(struct task_struct *task);
2460
2461
2462#ifdef CONFIG_SMP
2463extern void sched_exec(void);
2464#else
2465#define sched_exec() {}
2466#endif
2467
2468extern void sched_clock_idle_sleep_event(void);
2469extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2470
2471#ifdef CONFIG_HOTPLUG_CPU
2472extern void idle_task_exit(void);
2473#else
2474static inline void idle_task_exit(void) {}
2475#endif
2476
2477#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
2478extern void wake_up_nohz_cpu(int cpu);
2479#else
2480static inline void wake_up_nohz_cpu(int cpu) { }
2481#endif
2482
2483#ifdef CONFIG_NO_HZ_FULL
2484extern u64 scheduler_tick_max_deferment(void);
2485#endif
2486
2487#ifdef CONFIG_SCHED_AUTOGROUP
2488extern void sched_autogroup_create_attach(struct task_struct *p);
2489extern void sched_autogroup_detach(struct task_struct *p);
2490extern void sched_autogroup_fork(struct signal_struct *sig);
2491extern void sched_autogroup_exit(struct signal_struct *sig);
2492#ifdef CONFIG_PROC_FS
2493extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
2494extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
2495#endif
2496#else
2497static inline void sched_autogroup_create_attach(struct task_struct *p) { }
2498static inline void sched_autogroup_detach(struct task_struct *p) { }
2499static inline void sched_autogroup_fork(struct signal_struct *sig) { }
2500static inline void sched_autogroup_exit(struct signal_struct *sig) { }
2501#endif
2502
2503extern int yield_to(struct task_struct *p, bool preempt);
2504extern void set_user_nice(struct task_struct *p, long nice);
2505extern int task_prio(const struct task_struct *p);
2506
2507
2508
2509
2510
2511
2512static inline int task_nice(const struct task_struct *p)
2513{
2514 return PRIO_TO_NICE((p)->static_prio);
2515}
2516extern int can_nice(const struct task_struct *p, const int nice);
2517extern int task_curr(const struct task_struct *p);
2518extern int idle_cpu(int cpu);
2519extern int sched_setscheduler(struct task_struct *, int,
2520 const struct sched_param *);
2521extern int sched_setscheduler_nocheck(struct task_struct *, int,
2522 const struct sched_param *);
2523extern int sched_setattr(struct task_struct *,
2524 const struct sched_attr *);
2525extern struct task_struct *idle_task(int cpu);
2526
2527
2528
2529
2530
2531
2532static inline bool is_idle_task(const struct task_struct *p)
2533{
2534 return p->pid == 0;
2535}
2536extern struct task_struct *curr_task(int cpu);
2537extern void set_curr_task(int cpu, struct task_struct *p);
2538
2539void yield(void);
2540
2541union thread_union {
2542 struct thread_info thread_info;
2543 unsigned long stack[THREAD_SIZE/sizeof(long)];
2544};
2545
2546#ifndef __HAVE_ARCH_KSTACK_END
2547static inline int kstack_end(void *addr)
2548{
2549
2550
2551
2552 return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
2553}
2554#endif
2555
2556extern union thread_union init_thread_union;
2557extern struct task_struct init_task;
2558
2559extern struct mm_struct init_mm;
2560
2561extern struct pid_namespace init_pid_ns;
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574extern struct task_struct *find_task_by_vpid(pid_t nr);
2575extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2576 struct pid_namespace *ns);
2577
2578
2579extern struct user_struct * alloc_uid(kuid_t);
2580static inline struct user_struct *get_uid(struct user_struct *u)
2581{
2582 atomic_inc(&u->__count);
2583 return u;
2584}
2585extern void free_uid(struct user_struct *);
2586
2587#include <asm/current.h>
2588
2589extern void xtime_update(unsigned long ticks);
2590
2591extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2592extern int wake_up_process(struct task_struct *tsk);
2593extern void wake_up_new_task(struct task_struct *tsk);
2594#ifdef CONFIG_SMP
2595 extern void kick_process(struct task_struct *tsk);
2596#else
2597 static inline void kick_process(struct task_struct *tsk) { }
2598#endif
2599extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
2600extern void sched_dead(struct task_struct *p);
2601
2602extern void proc_caches_init(void);
2603extern void flush_signals(struct task_struct *);
2604extern void ignore_signals(struct task_struct *);
2605extern void flush_signal_handlers(struct task_struct *, int force_default);
2606extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
2607
2608static inline int kernel_dequeue_signal(siginfo_t *info)
2609{
2610 struct task_struct *tsk = current;
2611 siginfo_t __info;
2612 int ret;
2613
2614 spin_lock_irq(&tsk->sighand->siglock);
2615 ret = dequeue_signal(tsk, &tsk->blocked, info ?: &__info);
2616 spin_unlock_irq(&tsk->sighand->siglock);
2617
2618 return ret;
2619}
2620
2621static inline void kernel_signal_stop(void)
2622{
2623 spin_lock_irq(¤t->sighand->siglock);
2624 if (current->jobctl & JOBCTL_STOP_DEQUEUED)
2625 __set_current_state(TASK_STOPPED);
2626 spin_unlock_irq(¤t->sighand->siglock);
2627
2628 schedule();
2629}
2630
2631extern void release_task(struct task_struct * p);
2632extern int send_sig_info(int, struct siginfo *, struct task_struct *);
2633extern int force_sigsegv(int, struct task_struct *);
2634extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2635extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
2636extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2637extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
2638 const struct cred *, u32);
2639extern int kill_pgrp(struct pid *pid, int sig, int priv);
2640extern int kill_pid(struct pid *pid, int sig, int priv);
2641extern int kill_proc_info(int, struct siginfo *, pid_t);
2642extern __must_check bool do_notify_parent(struct task_struct *, int);
2643extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2644extern void force_sig(int, struct task_struct *);
2645extern int send_sig(int, struct task_struct *, int);
2646extern int zap_other_threads(struct task_struct *p);
2647extern struct sigqueue *sigqueue_alloc(void);
2648extern void sigqueue_free(struct sigqueue *);
2649extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
2650extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
2651
2652static inline void restore_saved_sigmask(void)
2653{
2654 if (test_and_clear_restore_sigmask())
2655 __set_current_blocked(¤t->saved_sigmask);
2656}
2657
2658static inline sigset_t *sigmask_to_save(void)
2659{
2660 sigset_t *res = ¤t->blocked;
2661 if (unlikely(test_restore_sigmask()))
2662 res = ¤t->saved_sigmask;
2663 return res;
2664}
2665
2666static inline int kill_cad_pid(int sig, int priv)
2667{
2668 return kill_pid(cad_pid, sig, priv);
2669}
2670
2671
2672#define SEND_SIG_NOINFO ((struct siginfo *) 0)
2673#define SEND_SIG_PRIV ((struct siginfo *) 1)
2674#define SEND_SIG_FORCED ((struct siginfo *) 2)
2675
2676
2677
2678
2679static inline int on_sig_stack(unsigned long sp)
2680{
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690 if (current->sas_ss_flags & SS_AUTODISARM)
2691 return 0;
2692
2693#ifdef CONFIG_STACK_GROWSUP
2694 return sp >= current->sas_ss_sp &&
2695 sp - current->sas_ss_sp < current->sas_ss_size;
2696#else
2697 return sp > current->sas_ss_sp &&
2698 sp - current->sas_ss_sp <= current->sas_ss_size;
2699#endif
2700}
2701
2702static inline int sas_ss_flags(unsigned long sp)
2703{
2704 if (!current->sas_ss_size)
2705 return SS_DISABLE;
2706
2707 return on_sig_stack(sp) ? SS_ONSTACK : 0;
2708}
2709
2710static inline void sas_ss_reset(struct task_struct *p)
2711{
2712 p->sas_ss_sp = 0;
2713 p->sas_ss_size = 0;
2714 p->sas_ss_flags = SS_DISABLE;
2715}
2716
2717static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
2718{
2719 if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
2720#ifdef CONFIG_STACK_GROWSUP
2721 return current->sas_ss_sp;
2722#else
2723 return current->sas_ss_sp + current->sas_ss_size;
2724#endif
2725 return sp;
2726}
2727
2728
2729
2730
2731extern struct mm_struct * mm_alloc(void);
2732
2733
2734extern void __mmdrop(struct mm_struct *);
2735static inline void mmdrop(struct mm_struct *mm)
2736{
2737 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
2738 __mmdrop(mm);
2739}
2740
2741static inline bool mmget_not_zero(struct mm_struct *mm)
2742{
2743 return atomic_inc_not_zero(&mm->mm_users);
2744}
2745
2746
2747extern void mmput(struct mm_struct *);
2748#ifdef CONFIG_MMU
2749
2750
2751
2752extern void mmput_async(struct mm_struct *);
2753#endif
2754
2755
2756extern struct mm_struct *get_task_mm(struct task_struct *task);
2757
2758
2759
2760
2761
2762extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
2763
2764extern void mm_release(struct task_struct *, struct mm_struct *);
2765
2766#ifdef CONFIG_HAVE_COPY_THREAD_TLS
2767extern int copy_thread_tls(unsigned long, unsigned long, unsigned long,
2768 struct task_struct *, unsigned long);
2769#else
2770extern int copy_thread(unsigned long, unsigned long, unsigned long,
2771 struct task_struct *);
2772
2773
2774
2775static inline int copy_thread_tls(
2776 unsigned long clone_flags, unsigned long sp, unsigned long arg,
2777 struct task_struct *p, unsigned long tls)
2778{
2779 return copy_thread(clone_flags, sp, arg, p);
2780}
2781#endif
2782extern void flush_thread(void);
2783
2784#ifdef CONFIG_HAVE_EXIT_THREAD
2785extern void exit_thread(struct task_struct *tsk);
2786#else
2787static inline void exit_thread(struct task_struct *tsk)
2788{
2789}
2790#endif
2791
2792extern void exit_files(struct task_struct *);
2793extern void __cleanup_sighand(struct sighand_struct *);
2794
2795extern void exit_itimers(struct signal_struct *);
2796extern void flush_itimer_signals(void);
2797
2798extern void do_group_exit(int);
2799
2800extern int do_execve(struct filename *,
2801 const char __user * const __user *,
2802 const char __user * const __user *);
2803extern int do_execveat(int, struct filename *,
2804 const char __user * const __user *,
2805 const char __user * const __user *,
2806 int);
2807extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long);
2808extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
2809struct task_struct *fork_idle(int);
2810extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
2811
2812extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
2813static inline void set_task_comm(struct task_struct *tsk, const char *from)
2814{
2815 __set_task_comm(tsk, from, false);
2816}
2817extern char *get_task_comm(char *to, struct task_struct *tsk);
2818
2819#ifdef CONFIG_SMP
2820void scheduler_ipi(void);
2821extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
2822#else
2823static inline void scheduler_ipi(void) { }
2824static inline unsigned long wait_task_inactive(struct task_struct *p,
2825 long match_state)
2826{
2827 return 1;
2828}
2829#endif
2830
2831#define tasklist_empty() \
2832 list_empty(&init_task.tasks)
2833
2834#define next_task(p) \
2835 list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
2836
2837#define for_each_process(p) \
2838 for (p = &init_task ; (p = next_task(p)) != &init_task ; )
2839
2840extern bool current_is_single_threaded(void);
2841
2842
2843
2844
2845
2846#define do_each_thread(g, t) \
2847 for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
2848
2849#define while_each_thread(g, t) \
2850 while ((t = next_thread(t)) != g)
2851
2852#define __for_each_thread(signal, t) \
2853 list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
2854
2855#define for_each_thread(p, t) \
2856 __for_each_thread((p)->signal, t)
2857
2858
2859#define for_each_process_thread(p, t) \
2860 for_each_process(p) for_each_thread(p, t)
2861
2862static inline int get_nr_threads(struct task_struct *tsk)
2863{
2864 return tsk->signal->nr_threads;
2865}
2866
2867static inline bool thread_group_leader(struct task_struct *p)
2868{
2869 return p->exit_signal >= 0;
2870}
2871
2872
2873
2874
2875
2876
2877
2878static inline bool has_group_leader_pid(struct task_struct *p)
2879{
2880 return task_pid(p) == p->signal->leader_pid;
2881}
2882
2883static inline
2884bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
2885{
2886 return p1->signal == p2->signal;
2887}
2888
2889static inline struct task_struct *next_thread(const struct task_struct *p)
2890{
2891 return list_entry_rcu(p->thread_group.next,
2892 struct task_struct, thread_group);
2893}
2894
2895static inline int thread_group_empty(struct task_struct *p)
2896{
2897 return list_empty(&p->thread_group);
2898}
2899
2900#define delay_group_leader(p) \
2901 (thread_group_leader(p) && !thread_group_empty(p))
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913static inline void task_lock(struct task_struct *p)
2914{
2915 spin_lock(&p->alloc_lock);
2916}
2917
2918static inline void task_unlock(struct task_struct *p)
2919{
2920 spin_unlock(&p->alloc_lock);
2921}
2922
2923extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2924 unsigned long *flags);
2925
2926static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
2927 unsigned long *flags)
2928{
2929 struct sighand_struct *ret;
2930
2931 ret = __lock_task_sighand(tsk, flags);
2932 (void)__cond_lock(&tsk->sighand->siglock, ret);
2933 return ret;
2934}
2935
2936static inline void unlock_task_sighand(struct task_struct *tsk,
2937 unsigned long *flags)
2938{
2939 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2940}
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953static inline void threadgroup_change_begin(struct task_struct *tsk)
2954{
2955 might_sleep();
2956 cgroup_threadgroup_change_begin(tsk);
2957}
2958
2959
2960
2961
2962
2963
2964
2965static inline void threadgroup_change_end(struct task_struct *tsk)
2966{
2967 cgroup_threadgroup_change_end(tsk);
2968}
2969
2970#ifndef __HAVE_THREAD_FUNCTIONS
2971
2972#define task_thread_info(task) ((struct thread_info *)(task)->stack)
2973#define task_stack_page(task) ((task)->stack)
2974
2975static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
2976{
2977 *task_thread_info(p) = *task_thread_info(org);
2978 task_thread_info(p)->task = p;
2979}
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990static inline unsigned long *end_of_stack(struct task_struct *p)
2991{
2992#ifdef CONFIG_STACK_GROWSUP
2993 return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
2994#else
2995 return (unsigned long *)(task_thread_info(p) + 1);
2996#endif
2997}
2998
2999#endif
3000#define task_stack_end_corrupted(task) \
3001 (*(end_of_stack(task)) != STACK_END_MAGIC)
3002
3003static inline int object_is_on_stack(void *obj)
3004{
3005 void *stack = task_stack_page(current);
3006
3007 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
3008}
3009
3010extern void thread_stack_cache_init(void);
3011
3012#ifdef CONFIG_DEBUG_STACK_USAGE
3013static inline unsigned long stack_not_used(struct task_struct *p)
3014{
3015 unsigned long *n = end_of_stack(p);
3016
3017 do {
3018# ifdef CONFIG_STACK_GROWSUP
3019 n--;
3020# else
3021 n++;
3022# endif
3023 } while (!*n);
3024
3025# ifdef CONFIG_STACK_GROWSUP
3026 return (unsigned long)end_of_stack(p) - (unsigned long)n;
3027# else
3028 return (unsigned long)n - (unsigned long)end_of_stack(p);
3029# endif
3030}
3031#endif
3032extern void set_task_stack_end_magic(struct task_struct *tsk);
3033
3034
3035
3036
3037static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
3038{
3039 set_ti_thread_flag(task_thread_info(tsk), flag);
3040}
3041
3042static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
3043{
3044 clear_ti_thread_flag(task_thread_info(tsk), flag);
3045}
3046
3047static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
3048{
3049 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
3050}
3051
3052static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
3053{
3054 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
3055}
3056
3057static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
3058{
3059 return test_ti_thread_flag(task_thread_info(tsk), flag);
3060}
3061
3062static inline void set_tsk_need_resched(struct task_struct *tsk)
3063{
3064 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
3065}
3066
3067static inline void clear_tsk_need_resched(struct task_struct *tsk)
3068{
3069 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
3070}
3071
3072static inline int test_tsk_need_resched(struct task_struct *tsk)
3073{
3074 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
3075}
3076
3077static inline int restart_syscall(void)
3078{
3079 set_tsk_thread_flag(current, TIF_SIGPENDING);
3080 return -ERESTARTNOINTR;
3081}
3082
3083static inline int signal_pending(struct task_struct *p)
3084{
3085 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
3086}
3087
3088static inline int __fatal_signal_pending(struct task_struct *p)
3089{
3090 return unlikely(sigismember(&p->pending.signal, SIGKILL));
3091}
3092
3093static inline int fatal_signal_pending(struct task_struct *p)
3094{
3095 return signal_pending(p) && __fatal_signal_pending(p);
3096}
3097
3098static inline int signal_pending_state(long state, struct task_struct *p)
3099{
3100 if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
3101 return 0;
3102 if (!signal_pending(p))
3103 return 0;
3104
3105 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
3106}
3107
3108
3109
3110
3111
3112
3113
3114
3115extern int _cond_resched(void);
3116
3117#define cond_resched() ({ \
3118 ___might_sleep(__FILE__, __LINE__, 0); \
3119 _cond_resched(); \
3120})
3121
3122extern int __cond_resched_lock(spinlock_t *lock);
3123
3124#define cond_resched_lock(lock) ({ \
3125 ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
3126 __cond_resched_lock(lock); \
3127})
3128
3129extern int __cond_resched_softirq(void);
3130
3131#define cond_resched_softirq() ({ \
3132 ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
3133 __cond_resched_softirq(); \
3134})
3135
3136static inline void cond_resched_rcu(void)
3137{
3138#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
3139 rcu_read_unlock();
3140 cond_resched();
3141 rcu_read_lock();
3142#endif
3143}
3144
3145
3146
3147
3148
3149
3150static inline int spin_needbreak(spinlock_t *lock)
3151{
3152#ifdef CONFIG_PREEMPT
3153 return spin_is_contended(lock);
3154#else
3155 return 0;
3156#endif
3157}
3158
3159
3160
3161
3162
3163#ifdef TIF_POLLING_NRFLAG
3164static inline int tsk_is_polling(struct task_struct *p)
3165{
3166 return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
3167}
3168
3169static inline void __current_set_polling(void)
3170{
3171 set_thread_flag(TIF_POLLING_NRFLAG);
3172}
3173
3174static inline bool __must_check current_set_polling_and_test(void)
3175{
3176 __current_set_polling();
3177
3178
3179
3180
3181
3182 smp_mb__after_atomic();
3183
3184 return unlikely(tif_need_resched());
3185}
3186
3187static inline void __current_clr_polling(void)
3188{
3189 clear_thread_flag(TIF_POLLING_NRFLAG);
3190}
3191
3192static inline bool __must_check current_clr_polling_and_test(void)
3193{
3194 __current_clr_polling();
3195
3196
3197
3198
3199
3200 smp_mb__after_atomic();
3201
3202 return unlikely(tif_need_resched());
3203}
3204
3205#else
3206static inline int tsk_is_polling(struct task_struct *p) { return 0; }
3207static inline void __current_set_polling(void) { }
3208static inline void __current_clr_polling(void) { }
3209
3210static inline bool __must_check current_set_polling_and_test(void)
3211{
3212 return unlikely(tif_need_resched());
3213}
3214static inline bool __must_check current_clr_polling_and_test(void)
3215{
3216 return unlikely(tif_need_resched());
3217}
3218#endif
3219
3220static inline void current_clr_polling(void)
3221{
3222 __current_clr_polling();
3223
3224
3225
3226
3227
3228
3229
3230 smp_mb();
3231
3232 preempt_fold_need_resched();
3233}
3234
3235static __always_inline bool need_resched(void)
3236{
3237 return unlikely(tif_need_resched());
3238}
3239
3240
3241
3242
3243void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
3244void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
3245
3246
3247
3248
3249
3250
3251
3252extern void recalc_sigpending_and_wake(struct task_struct *t);
3253extern void recalc_sigpending(void);
3254
3255extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
3256
3257static inline void signal_wake_up(struct task_struct *t, bool resume)
3258{
3259 signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
3260}
3261static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
3262{
3263 signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
3264}
3265
3266
3267
3268
3269#ifdef CONFIG_SMP
3270
3271static inline unsigned int task_cpu(const struct task_struct *p)
3272{
3273 return task_thread_info(p)->cpu;
3274}
3275
3276static inline int task_node(const struct task_struct *p)
3277{
3278 return cpu_to_node(task_cpu(p));
3279}
3280
3281extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
3282
3283#else
3284
3285static inline unsigned int task_cpu(const struct task_struct *p)
3286{
3287 return 0;
3288}
3289
3290static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
3291{
3292}
3293
3294#endif
3295
3296extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
3297extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
3298
3299#ifdef CONFIG_CGROUP_SCHED
3300extern struct task_group root_task_group;
3301#endif
3302
3303extern int task_can_switch_user(struct user_struct *up,
3304 struct task_struct *tsk);
3305
3306#ifdef CONFIG_TASK_XACCT
3307static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
3308{
3309 tsk->ioac.rchar += amt;
3310}
3311
3312static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
3313{
3314 tsk->ioac.wchar += amt;
3315}
3316
3317static inline void inc_syscr(struct task_struct *tsk)
3318{
3319 tsk->ioac.syscr++;
3320}
3321
3322static inline void inc_syscw(struct task_struct *tsk)
3323{
3324 tsk->ioac.syscw++;
3325}
3326#else
3327static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
3328{
3329}
3330
3331static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
3332{
3333}
3334
3335static inline void inc_syscr(struct task_struct *tsk)
3336{
3337}
3338
3339static inline void inc_syscw(struct task_struct *tsk)
3340{
3341}
3342#endif
3343
3344#ifndef TASK_SIZE_OF
3345#define TASK_SIZE_OF(tsk) TASK_SIZE
3346#endif
3347
3348#ifdef CONFIG_MEMCG
3349extern void mm_update_next_owner(struct mm_struct *mm);
3350#else
3351static inline void mm_update_next_owner(struct mm_struct *mm)
3352{
3353}
3354#endif
3355
3356static inline unsigned long task_rlimit(const struct task_struct *tsk,
3357 unsigned int limit)
3358{
3359 return READ_ONCE(tsk->signal->rlim[limit].rlim_cur);
3360}
3361
3362static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
3363 unsigned int limit)
3364{
3365 return READ_ONCE(tsk->signal->rlim[limit].rlim_max);
3366}
3367
3368static inline unsigned long rlimit(unsigned int limit)
3369{
3370 return task_rlimit(current, limit);
3371}
3372
3373static inline unsigned long rlimit_max(unsigned int limit)
3374{
3375 return task_rlimit_max(current, limit);
3376}
3377
3378#ifdef CONFIG_CPU_FREQ
3379struct update_util_data {
3380 void (*func)(struct update_util_data *data,
3381 u64 time, unsigned long util, unsigned long max);
3382};
3383
3384void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
3385 void (*func)(struct update_util_data *data, u64 time,
3386 unsigned long util, unsigned long max));
3387void cpufreq_remove_update_util_hook(int cpu);
3388#endif
3389
3390#endif
3391