1#ifndef _LINUX_SCHED_H
2#define _LINUX_SCHED_H
3
4#include <uapi/linux/sched.h>
5
6#include <linux/sched/prio.h>
7
8
9struct sched_param {
10 int sched_priority;
11};
12
13#include <asm/param.h>
14
15#include <linux/capability.h>
16#include <linux/threads.h>
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/timex.h>
20#include <linux/jiffies.h>
21#include <linux/plist.h>
22#include <linux/rbtree.h>
23#include <linux/thread_info.h>
24#include <linux/cpumask.h>
25#include <linux/errno.h>
26#include <linux/nodemask.h>
27#include <linux/mm_types.h>
28#include <linux/preempt.h>
29
30#include <asm/page.h>
31#include <asm/ptrace.h>
32#include <linux/cputime.h>
33
34#include <linux/smp.h>
35#include <linux/sem.h>
36#include <linux/shm.h>
37#include <linux/signal.h>
38#include <linux/compiler.h>
39#include <linux/completion.h>
40#include <linux/pid.h>
41#include <linux/percpu.h>
42#include <linux/topology.h>
43#include <linux/seccomp.h>
44#include <linux/rcupdate.h>
45#include <linux/rculist.h>
46#include <linux/rtmutex.h>
47
48#include <linux/time.h>
49#include <linux/param.h>
50#include <linux/resource.h>
51#include <linux/timer.h>
52#include <linux/hrtimer.h>
53#include <linux/kcov.h>
54#include <linux/task_io_accounting.h>
55#include <linux/latencytop.h>
56#include <linux/cred.h>
57#include <linux/llist.h>
58#include <linux/uidgid.h>
59#include <linux/gfp.h>
60#include <linux/magic.h>
61#include <linux/cgroup-defs.h>
62
63#include <asm/processor.h>
64
65#define SCHED_ATTR_SIZE_VER0 48
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111struct sched_attr {
112 u32 size;
113
114 u32 sched_policy;
115 u64 sched_flags;
116
117
118 s32 sched_nice;
119
120
121 u32 sched_priority;
122
123
124 u64 sched_runtime;
125 u64 sched_deadline;
126 u64 sched_period;
127};
128
129struct futex_pi_state;
130struct robust_list_head;
131struct bio_list;
132struct fs_struct;
133struct perf_event_context;
134struct blk_plug;
135struct filename;
136struct nameidata;
137
138#define VMACACHE_BITS 2
139#define VMACACHE_SIZE (1U << VMACACHE_BITS)
140#define VMACACHE_MASK (VMACACHE_SIZE - 1)
141
142
143
144
145
146
147
148
149
150
151
152extern unsigned long avenrun[];
153extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
154
155#define FSHIFT 11
156#define FIXED_1 (1<<FSHIFT)
157#define LOAD_FREQ (5*HZ+1)
158#define EXP_1 1884
159#define EXP_5 2014
160#define EXP_15 2037
161
162#define CALC_LOAD(load,exp,n) \
163 load *= exp; \
164 load += n*(FIXED_1-exp); \
165 load >>= FSHIFT;
166
167extern unsigned long total_forks;
168extern int nr_threads;
169DECLARE_PER_CPU(unsigned long, process_counts);
170extern int nr_processes(void);
171extern unsigned long nr_running(void);
172extern bool single_task_running(void);
173extern unsigned long nr_iowait(void);
174extern unsigned long nr_iowait_cpu(int cpu);
175extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
176
177extern void calc_global_load(unsigned long ticks);
178
179#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
180extern void cpu_load_update_nohz_start(void);
181extern void cpu_load_update_nohz_stop(void);
182#else
183static inline void cpu_load_update_nohz_start(void) { }
184static inline void cpu_load_update_nohz_stop(void) { }
185#endif
186
187extern void dump_cpu_task(int cpu);
188
189struct seq_file;
190struct cfs_rq;
191struct task_group;
192#ifdef CONFIG_SCHED_DEBUG
193extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
194extern void proc_sched_set_task(struct task_struct *p);
195#endif
196
197
198
199
200
201
202
203
204
205
206
207#define TASK_RUNNING 0
208#define TASK_INTERRUPTIBLE 1
209#define TASK_UNINTERRUPTIBLE 2
210#define __TASK_STOPPED 4
211#define __TASK_TRACED 8
212
213#define EXIT_DEAD 16
214#define EXIT_ZOMBIE 32
215#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
216
217#define TASK_DEAD 64
218#define TASK_WAKEKILL 128
219#define TASK_WAKING 256
220#define TASK_PARKED 512
221#define TASK_NOLOAD 1024
222#define TASK_NEW 2048
223#define TASK_STATE_MAX 4096
224
225#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn"
226
227extern char ___assert_task_state[1 - 2*!!(
228 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
229
230
231#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
232#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
233#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
234
235#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
236
237
238#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
239#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
240
241
242#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
243 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
244 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
245
246#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
247#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
248#define task_is_stopped_or_traced(task) \
249 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
250#define task_contributes_to_load(task) \
251 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
252 (task->flags & PF_FROZEN) == 0 && \
253 (task->state & TASK_NOLOAD) == 0)
254
255#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
256
257#define __set_task_state(tsk, state_value) \
258 do { \
259 (tsk)->task_state_change = _THIS_IP_; \
260 (tsk)->state = (state_value); \
261 } while (0)
262#define set_task_state(tsk, state_value) \
263 do { \
264 (tsk)->task_state_change = _THIS_IP_; \
265 smp_store_mb((tsk)->state, (state_value)); \
266 } while (0)
267
268
269
270
271
272
273
274
275
276
277
278
279#define __set_current_state(state_value) \
280 do { \
281 current->task_state_change = _THIS_IP_; \
282 current->state = (state_value); \
283 } while (0)
284#define set_current_state(state_value) \
285 do { \
286 current->task_state_change = _THIS_IP_; \
287 smp_store_mb(current->state, (state_value)); \
288 } while (0)
289
290#else
291
292#define __set_task_state(tsk, state_value) \
293 do { (tsk)->state = (state_value); } while (0)
294#define set_task_state(tsk, state_value) \
295 smp_store_mb((tsk)->state, (state_value))
296
297
298
299
300
301
302
303
304
305
306
307
308#define __set_current_state(state_value) \
309 do { current->state = (state_value); } while (0)
310#define set_current_state(state_value) \
311 smp_store_mb(current->state, (state_value))
312
313#endif
314
315
316#define TASK_COMM_LEN 16
317
318#include <linux/spinlock.h>
319
320
321
322
323
324
325
326extern rwlock_t tasklist_lock;
327extern spinlock_t mmlist_lock;
328
329struct task_struct;
330
331#ifdef CONFIG_PROVE_RCU
332extern int lockdep_tasklist_lock_is_held(void);
333#endif
334
335extern void sched_init(void);
336extern void sched_init_smp(void);
337extern asmlinkage void schedule_tail(struct task_struct *prev);
338extern void init_idle(struct task_struct *idle, int cpu);
339extern void init_idle_bootup_task(struct task_struct *idle);
340
341extern cpumask_var_t cpu_isolated_map;
342
343extern int runqueue_is_locked(int cpu);
344
345#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
346extern void nohz_balance_enter_idle(int cpu);
347extern void set_cpu_sd_state_idle(void);
348extern int get_nohz_timer_target(void);
349#else
350static inline void nohz_balance_enter_idle(int cpu) { }
351static inline void set_cpu_sd_state_idle(void) { }
352#endif
353
354
355
356
357extern void show_state_filter(unsigned long state_filter);
358
359static inline void show_state(void)
360{
361 show_state_filter(0);
362}
363
364extern void show_regs(struct pt_regs *);
365
366
367
368
369
370
371extern void show_stack(struct task_struct *task, unsigned long *sp);
372
373extern void cpu_init (void);
374extern void trap_init(void);
375extern void update_process_times(int user);
376extern void scheduler_tick(void);
377extern int sched_cpu_starting(unsigned int cpu);
378extern int sched_cpu_activate(unsigned int cpu);
379extern int sched_cpu_deactivate(unsigned int cpu);
380
381#ifdef CONFIG_HOTPLUG_CPU
382extern int sched_cpu_dying(unsigned int cpu);
383#else
384# define sched_cpu_dying NULL
385#endif
386
387extern void sched_show_task(struct task_struct *p);
388
389#ifdef CONFIG_LOCKUP_DETECTOR
390extern void touch_softlockup_watchdog_sched(void);
391extern void touch_softlockup_watchdog(void);
392extern void touch_softlockup_watchdog_sync(void);
393extern void touch_all_softlockup_watchdogs(void);
394extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
395 void __user *buffer,
396 size_t *lenp, loff_t *ppos);
397extern unsigned int softlockup_panic;
398extern unsigned int hardlockup_panic;
399void lockup_detector_init(void);
400#else
401static inline void touch_softlockup_watchdog_sched(void)
402{
403}
404static inline void touch_softlockup_watchdog(void)
405{
406}
407static inline void touch_softlockup_watchdog_sync(void)
408{
409}
410static inline void touch_all_softlockup_watchdogs(void)
411{
412}
413static inline void lockup_detector_init(void)
414{
415}
416#endif
417
418#ifdef CONFIG_DETECT_HUNG_TASK
419void reset_hung_task_detector(void);
420#else
421static inline void reset_hung_task_detector(void)
422{
423}
424#endif
425
426
427#define __sched __attribute__((__section__(".sched.text")))
428
429
430extern char __sched_text_start[], __sched_text_end[];
431
432
433extern int in_sched_functions(unsigned long addr);
434
435#define MAX_SCHEDULE_TIMEOUT LONG_MAX
436extern signed long schedule_timeout(signed long timeout);
437extern signed long schedule_timeout_interruptible(signed long timeout);
438extern signed long schedule_timeout_killable(signed long timeout);
439extern signed long schedule_timeout_uninterruptible(signed long timeout);
440extern signed long schedule_timeout_idle(signed long timeout);
441asmlinkage void schedule(void);
442extern void schedule_preempt_disabled(void);
443
444extern long io_schedule_timeout(long timeout);
445
446static inline void io_schedule(void)
447{
448 io_schedule_timeout(MAX_SCHEDULE_TIMEOUT);
449}
450
451struct nsproxy;
452struct user_namespace;
453
454#ifdef CONFIG_MMU
455extern void arch_pick_mmap_layout(struct mm_struct *mm);
456extern unsigned long
457arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
458 unsigned long, unsigned long);
459extern unsigned long
460arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
461 unsigned long len, unsigned long pgoff,
462 unsigned long flags);
463#else
464static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
465#endif
466
467#define SUID_DUMP_DISABLE 0
468#define SUID_DUMP_USER 1
469#define SUID_DUMP_ROOT 2
470
471
472
473
474#define MMF_DUMPABLE_BITS 2
475#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
476
477extern void set_dumpable(struct mm_struct *mm, int value);
478
479
480
481
482
483
484static inline int __get_dumpable(unsigned long mm_flags)
485{
486 return mm_flags & MMF_DUMPABLE_MASK;
487}
488
489static inline int get_dumpable(struct mm_struct *mm)
490{
491 return __get_dumpable(mm->flags);
492}
493
494
495#define MMF_DUMP_ANON_PRIVATE 2
496#define MMF_DUMP_ANON_SHARED 3
497#define MMF_DUMP_MAPPED_PRIVATE 4
498#define MMF_DUMP_MAPPED_SHARED 5
499#define MMF_DUMP_ELF_HEADERS 6
500#define MMF_DUMP_HUGETLB_PRIVATE 7
501#define MMF_DUMP_HUGETLB_SHARED 8
502#define MMF_DUMP_DAX_PRIVATE 9
503#define MMF_DUMP_DAX_SHARED 10
504
505#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
506#define MMF_DUMP_FILTER_BITS 9
507#define MMF_DUMP_FILTER_MASK \
508 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
509#define MMF_DUMP_FILTER_DEFAULT \
510 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\
511 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
512
513#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
514# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS)
515#else
516# define MMF_DUMP_MASK_DEFAULT_ELF 0
517#endif
518
519#define MMF_VM_MERGEABLE 16
520#define MMF_VM_HUGEPAGE 17
521#define MMF_EXE_FILE_CHANGED 18
522
523#define MMF_HAS_UPROBES 19
524#define MMF_RECALC_UPROBES 20
525#define MMF_OOM_REAPED 21
526#define MMF_OOM_NOT_REAPABLE 22
527
528#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
529
530struct sighand_struct {
531 atomic_t count;
532 struct k_sigaction action[_NSIG];
533 spinlock_t siglock;
534 wait_queue_head_t signalfd_wqh;
535};
536
537struct pacct_struct {
538 int ac_flag;
539 long ac_exitcode;
540 unsigned long ac_mem;
541 cputime_t ac_utime, ac_stime;
542 unsigned long ac_minflt, ac_majflt;
543};
544
545struct cpu_itimer {
546 cputime_t expires;
547 cputime_t incr;
548 u32 error;
549 u32 incr_error;
550};
551
552
553
554
555
556
557
558
559
560
561struct prev_cputime {
562#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
563 cputime_t utime;
564 cputime_t stime;
565 raw_spinlock_t lock;
566#endif
567};
568
569static inline void prev_cputime_init(struct prev_cputime *prev)
570{
571#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
572 prev->utime = prev->stime = 0;
573 raw_spin_lock_init(&prev->lock);
574#endif
575}
576
577
578
579
580
581
582
583
584
585
586
587struct task_cputime {
588 cputime_t utime;
589 cputime_t stime;
590 unsigned long long sum_exec_runtime;
591};
592
593
594#define virt_exp utime
595#define prof_exp stime
596#define sched_exp sum_exec_runtime
597
598#define INIT_CPUTIME \
599 (struct task_cputime) { \
600 .utime = 0, \
601 .stime = 0, \
602 .sum_exec_runtime = 0, \
603 }
604
605
606
607
608
609struct task_cputime_atomic {
610 atomic64_t utime;
611 atomic64_t stime;
612 atomic64_t sum_exec_runtime;
613};
614
615#define INIT_CPUTIME_ATOMIC \
616 (struct task_cputime_atomic) { \
617 .utime = ATOMIC64_INIT(0), \
618 .stime = ATOMIC64_INIT(0), \
619 .sum_exec_runtime = ATOMIC64_INIT(0), \
620 }
621
622#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
623
624
625
626
627
628
629
630#define INIT_PREEMPT_COUNT PREEMPT_OFFSET
631
632
633
634
635
636
637
638
639
640
641#define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
642
643
644
645
646
647
648
649
650
651
652
653
654struct thread_group_cputimer {
655 struct task_cputime_atomic cputime_atomic;
656 bool running;
657 bool checking_timer;
658};
659
660#include <linux/rwsem.h>
661struct autogroup;
662
663
664
665
666
667
668
669
670struct signal_struct {
671 atomic_t sigcnt;
672 atomic_t live;
673 int nr_threads;
674 atomic_t oom_victims;
675 struct list_head thread_head;
676
677 wait_queue_head_t wait_chldexit;
678
679
680 struct task_struct *curr_target;
681
682
683 struct sigpending shared_pending;
684
685
686 int group_exit_code;
687
688
689
690
691
692 int notify_count;
693 struct task_struct *group_exit_task;
694
695
696 int group_stop_count;
697 unsigned int flags;
698
699
700
701
702
703
704
705
706
707
708 unsigned int is_child_subreaper:1;
709 unsigned int has_child_subreaper:1;
710
711
712 int posix_timer_id;
713 struct list_head posix_timers;
714
715
716 struct hrtimer real_timer;
717 struct pid *leader_pid;
718 ktime_t it_real_incr;
719
720
721
722
723
724
725 struct cpu_itimer it[2];
726
727
728
729
730
731 struct thread_group_cputimer cputimer;
732
733
734 struct task_cputime cputime_expires;
735
736#ifdef CONFIG_NO_HZ_FULL
737 atomic_t tick_dep_mask;
738#endif
739
740 struct list_head cpu_timers[3];
741
742 struct pid *tty_old_pgrp;
743
744
745 int leader;
746
747 struct tty_struct *tty;
748
749#ifdef CONFIG_SCHED_AUTOGROUP
750 struct autogroup *autogroup;
751#endif
752
753
754
755
756
757
758 seqlock_t stats_lock;
759 cputime_t utime, stime, cutime, cstime;
760 cputime_t gtime;
761 cputime_t cgtime;
762 struct prev_cputime prev_cputime;
763 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
764 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
765 unsigned long inblock, oublock, cinblock, coublock;
766 unsigned long maxrss, cmaxrss;
767 struct task_io_accounting ioac;
768
769
770
771
772
773
774
775 unsigned long long sum_sched_runtime;
776
777
778
779
780
781
782
783
784
785
786 struct rlimit rlim[RLIM_NLIMITS];
787
788#ifdef CONFIG_BSD_PROCESS_ACCT
789 struct pacct_struct pacct;
790#endif
791#ifdef CONFIG_TASKSTATS
792 struct taskstats *stats;
793#endif
794#ifdef CONFIG_AUDIT
795 unsigned audit_tty;
796 struct tty_audit_buf *tty_audit_buf;
797#endif
798
799
800
801
802
803 bool oom_flag_origin;
804 short oom_score_adj;
805 short oom_score_adj_min;
806
807
808 struct mutex cred_guard_mutex;
809
810
811};
812
813
814
815
816#define SIGNAL_STOP_STOPPED 0x00000001
817#define SIGNAL_STOP_CONTINUED 0x00000002
818#define SIGNAL_GROUP_EXIT 0x00000004
819#define SIGNAL_GROUP_COREDUMP 0x00000008
820
821
822
823#define SIGNAL_CLD_STOPPED 0x00000010
824#define SIGNAL_CLD_CONTINUED 0x00000020
825#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
826
827#define SIGNAL_UNKILLABLE 0x00000040
828
829
830static inline int signal_group_exit(const struct signal_struct *sig)
831{
832 return (sig->flags & SIGNAL_GROUP_EXIT) ||
833 (sig->group_exit_task != NULL);
834}
835
836
837
838
839struct user_struct {
840 atomic_t __count;
841 atomic_t processes;
842 atomic_t sigpending;
843#ifdef CONFIG_INOTIFY_USER
844 atomic_t inotify_watches;
845 atomic_t inotify_devs;
846#endif
847#ifdef CONFIG_FANOTIFY
848 atomic_t fanotify_listeners;
849#endif
850#ifdef CONFIG_EPOLL
851 atomic_long_t epoll_watches;
852#endif
853#ifdef CONFIG_POSIX_MQUEUE
854
855 unsigned long mq_bytes;
856#endif
857 unsigned long locked_shm;
858 unsigned long unix_inflight;
859 atomic_long_t pipe_bufs;
860
861#ifdef CONFIG_KEYS
862 struct key *uid_keyring;
863 struct key *session_keyring;
864#endif
865
866
867 struct hlist_node uidhash_node;
868 kuid_t uid;
869
870#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL)
871 atomic_long_t locked_vm;
872#endif
873};
874
875extern int uids_sysfs_init(void);
876
877extern struct user_struct *find_user(kuid_t);
878
879extern struct user_struct root_user;
880#define INIT_USER (&root_user)
881
882
883struct backing_dev_info;
884struct reclaim_state;
885
886#ifdef CONFIG_SCHED_INFO
887struct sched_info {
888
889 unsigned long pcount;
890 unsigned long long run_delay;
891
892
893 unsigned long long last_arrival,
894 last_queued;
895};
896#endif
897
898#ifdef CONFIG_TASK_DELAY_ACCT
899struct task_delay_info {
900 spinlock_t lock;
901 unsigned int flags;
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918 u64 blkio_start;
919 u64 blkio_delay;
920 u64 swapin_delay;
921 u32 blkio_count;
922
923 u32 swapin_count;
924
925
926 u64 freepages_start;
927 u64 freepages_delay;
928 u32 freepages_count;
929};
930#endif
931
932static inline int sched_info_on(void)
933{
934#ifdef CONFIG_SCHEDSTATS
935 return 1;
936#elif defined(CONFIG_TASK_DELAY_ACCT)
937 extern int delayacct_on;
938 return delayacct_on;
939#else
940 return 0;
941#endif
942}
943
944#ifdef CONFIG_SCHEDSTATS
945void force_schedstat_enabled(void);
946#endif
947
948enum cpu_idle_type {
949 CPU_IDLE,
950 CPU_NOT_IDLE,
951 CPU_NEWLY_IDLE,
952 CPU_MAX_IDLE_TYPES
953};
954
955
956
957
958
959
960
961
962# define SCHED_FIXEDPOINT_SHIFT 10
963# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT)
964
965
966
967
968#define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT
969#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997struct wake_q_node {
998 struct wake_q_node *next;
999};
1000
1001struct wake_q_head {
1002 struct wake_q_node *first;
1003 struct wake_q_node **lastp;
1004};
1005
1006#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
1007
1008#define WAKE_Q(name) \
1009 struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
1010
1011extern void wake_q_add(struct wake_q_head *head,
1012 struct task_struct *task);
1013extern void wake_up_q(struct wake_q_head *head);
1014
1015
1016
1017
1018#ifdef CONFIG_SMP
1019#define SD_LOAD_BALANCE 0x0001
1020#define SD_BALANCE_NEWIDLE 0x0002
1021#define SD_BALANCE_EXEC 0x0004
1022#define SD_BALANCE_FORK 0x0008
1023#define SD_BALANCE_WAKE 0x0010
1024#define SD_WAKE_AFFINE 0x0020
1025#define SD_SHARE_CPUCAPACITY 0x0080
1026#define SD_SHARE_POWERDOMAIN 0x0100
1027#define SD_SHARE_PKG_RESOURCES 0x0200
1028#define SD_SERIALIZE 0x0400
1029#define SD_ASYM_PACKING 0x0800
1030#define SD_PREFER_SIBLING 0x1000
1031#define SD_OVERLAP 0x2000
1032#define SD_NUMA 0x4000
1033
1034#ifdef CONFIG_SCHED_SMT
1035static inline int cpu_smt_flags(void)
1036{
1037 return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
1038}
1039#endif
1040
1041#ifdef CONFIG_SCHED_MC
1042static inline int cpu_core_flags(void)
1043{
1044 return SD_SHARE_PKG_RESOURCES;
1045}
1046#endif
1047
1048#ifdef CONFIG_NUMA
1049static inline int cpu_numa_flags(void)
1050{
1051 return SD_NUMA;
1052}
1053#endif
1054
1055struct sched_domain_attr {
1056 int relax_domain_level;
1057};
1058
1059#define SD_ATTR_INIT (struct sched_domain_attr) { \
1060 .relax_domain_level = -1, \
1061}
1062
1063extern int sched_domain_level_max;
1064
1065struct sched_group;
1066
1067struct sched_domain {
1068
1069 struct sched_domain *parent;
1070 struct sched_domain *child;
1071 struct sched_group *groups;
1072 unsigned long min_interval;
1073 unsigned long max_interval;
1074 unsigned int busy_factor;
1075 unsigned int imbalance_pct;
1076 unsigned int cache_nice_tries;
1077 unsigned int busy_idx;
1078 unsigned int idle_idx;
1079 unsigned int newidle_idx;
1080 unsigned int wake_idx;
1081 unsigned int forkexec_idx;
1082 unsigned int smt_gain;
1083
1084 int nohz_idle;
1085 int flags;
1086 int level;
1087
1088
1089 unsigned long last_balance;
1090 unsigned int balance_interval;
1091 unsigned int nr_balance_failed;
1092
1093
1094 u64 max_newidle_lb_cost;
1095 unsigned long next_decay_max_lb_cost;
1096
1097#ifdef CONFIG_SCHEDSTATS
1098
1099 unsigned int lb_count[CPU_MAX_IDLE_TYPES];
1100 unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
1101 unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
1102 unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
1103 unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
1104 unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
1105 unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
1106 unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
1107
1108
1109 unsigned int alb_count;
1110 unsigned int alb_failed;
1111 unsigned int alb_pushed;
1112
1113
1114 unsigned int sbe_count;
1115 unsigned int sbe_balanced;
1116 unsigned int sbe_pushed;
1117
1118
1119 unsigned int sbf_count;
1120 unsigned int sbf_balanced;
1121 unsigned int sbf_pushed;
1122
1123
1124 unsigned int ttwu_wake_remote;
1125 unsigned int ttwu_move_affine;
1126 unsigned int ttwu_move_balance;
1127#endif
1128#ifdef CONFIG_SCHED_DEBUG
1129 char *name;
1130#endif
1131 union {
1132 void *private;
1133 struct rcu_head rcu;
1134 };
1135
1136 unsigned int span_weight;
1137
1138
1139
1140
1141
1142
1143
1144 unsigned long span[0];
1145};
1146
1147static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
1148{
1149 return to_cpumask(sd->span);
1150}
1151
1152extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1153 struct sched_domain_attr *dattr_new);
1154
1155
1156cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
1157void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
1158
1159bool cpus_share_cache(int this_cpu, int that_cpu);
1160
1161typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
1162typedef int (*sched_domain_flags_f)(void);
1163
1164#define SDTL_OVERLAP 0x01
1165
1166struct sd_data {
1167 struct sched_domain **__percpu sd;
1168 struct sched_group **__percpu sg;
1169 struct sched_group_capacity **__percpu sgc;
1170};
1171
1172struct sched_domain_topology_level {
1173 sched_domain_mask_f mask;
1174 sched_domain_flags_f sd_flags;
1175 int flags;
1176 int numa_level;
1177 struct sd_data data;
1178#ifdef CONFIG_SCHED_DEBUG
1179 char *name;
1180#endif
1181};
1182
1183extern void set_sched_topology(struct sched_domain_topology_level *tl);
1184extern void wake_up_if_idle(int cpu);
1185
1186#ifdef CONFIG_SCHED_DEBUG
1187# define SD_INIT_NAME(type) .name = #type
1188#else
1189# define SD_INIT_NAME(type)
1190#endif
1191
1192#else
1193
1194struct sched_domain_attr;
1195
1196static inline void
1197partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1198 struct sched_domain_attr *dattr_new)
1199{
1200}
1201
1202static inline bool cpus_share_cache(int this_cpu, int that_cpu)
1203{
1204 return true;
1205}
1206
1207#endif
1208
1209
1210struct io_context;
1211
1212
1213#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
1214extern void prefetch_stack(struct task_struct *t);
1215#else
1216static inline void prefetch_stack(struct task_struct *t) { }
1217#endif
1218
1219struct audit_context;
1220struct mempolicy;
1221struct pipe_inode_info;
1222struct uts_namespace;
1223
1224struct load_weight {
1225 unsigned long weight;
1226 u32 inv_weight;
1227};
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281struct sched_avg {
1282 u64 last_update_time, load_sum;
1283 u32 util_sum, period_contrib;
1284 unsigned long load_avg, util_avg;
1285};
1286
1287#ifdef CONFIG_SCHEDSTATS
1288struct sched_statistics {
1289 u64 wait_start;
1290 u64 wait_max;
1291 u64 wait_count;
1292 u64 wait_sum;
1293 u64 iowait_count;
1294 u64 iowait_sum;
1295
1296 u64 sleep_start;
1297 u64 sleep_max;
1298 s64 sum_sleep_runtime;
1299
1300 u64 block_start;
1301 u64 block_max;
1302 u64 exec_max;
1303 u64 slice_max;
1304
1305 u64 nr_migrations_cold;
1306 u64 nr_failed_migrations_affine;
1307 u64 nr_failed_migrations_running;
1308 u64 nr_failed_migrations_hot;
1309 u64 nr_forced_migrations;
1310
1311 u64 nr_wakeups;
1312 u64 nr_wakeups_sync;
1313 u64 nr_wakeups_migrate;
1314 u64 nr_wakeups_local;
1315 u64 nr_wakeups_remote;
1316 u64 nr_wakeups_affine;
1317 u64 nr_wakeups_affine_attempts;
1318 u64 nr_wakeups_passive;
1319 u64 nr_wakeups_idle;
1320};
1321#endif
1322
1323struct sched_entity {
1324 struct load_weight load;
1325 struct rb_node run_node;
1326 struct list_head group_node;
1327 unsigned int on_rq;
1328
1329 u64 exec_start;
1330 u64 sum_exec_runtime;
1331 u64 vruntime;
1332 u64 prev_sum_exec_runtime;
1333
1334 u64 nr_migrations;
1335
1336#ifdef CONFIG_SCHEDSTATS
1337 struct sched_statistics statistics;
1338#endif
1339
1340#ifdef CONFIG_FAIR_GROUP_SCHED
1341 int depth;
1342 struct sched_entity *parent;
1343
1344 struct cfs_rq *cfs_rq;
1345
1346 struct cfs_rq *my_q;
1347#endif
1348
1349#ifdef CONFIG_SMP
1350
1351
1352
1353
1354
1355
1356 struct sched_avg avg ____cacheline_aligned_in_smp;
1357#endif
1358};
1359
1360struct sched_rt_entity {
1361 struct list_head run_list;
1362 unsigned long timeout;
1363 unsigned long watchdog_stamp;
1364 unsigned int time_slice;
1365 unsigned short on_rq;
1366 unsigned short on_list;
1367
1368 struct sched_rt_entity *back;
1369#ifdef CONFIG_RT_GROUP_SCHED
1370 struct sched_rt_entity *parent;
1371
1372 struct rt_rq *rt_rq;
1373
1374 struct rt_rq *my_q;
1375#endif
1376};
1377
1378struct sched_dl_entity {
1379 struct rb_node rb_node;
1380
1381
1382
1383
1384
1385
1386 u64 dl_runtime;
1387 u64 dl_deadline;
1388 u64 dl_period;
1389 u64 dl_bw;
1390
1391
1392
1393
1394
1395
1396 s64 runtime;
1397 u64 deadline;
1398 unsigned int flags;
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414 int dl_throttled, dl_boosted, dl_yielded;
1415
1416
1417
1418
1419
1420 struct hrtimer dl_timer;
1421};
1422
1423union rcu_special {
1424 struct {
1425 u8 blocked;
1426 u8 need_qs;
1427 u8 exp_need_qs;
1428 u8 pad;
1429 } b;
1430 u32 s;
1431};
1432struct rcu_node;
1433
1434enum perf_event_task_context {
1435 perf_invalid_context = -1,
1436 perf_hw_context = 0,
1437 perf_sw_context,
1438 perf_nr_task_contexts,
1439};
1440
1441
1442struct tlbflush_unmap_batch {
1443
1444
1445
1446
1447 struct cpumask cpumask;
1448
1449
1450 bool flush_required;
1451
1452
1453
1454
1455
1456
1457 bool writable;
1458};
1459
1460struct task_struct {
1461 volatile long state;
1462 void *stack;
1463 atomic_t usage;
1464 unsigned int flags;
1465 unsigned int ptrace;
1466
1467#ifdef CONFIG_SMP
1468 struct llist_node wake_entry;
1469 int on_cpu;
1470 unsigned int wakee_flips;
1471 unsigned long wakee_flip_decay_ts;
1472 struct task_struct *last_wakee;
1473
1474 int wake_cpu;
1475#endif
1476 int on_rq;
1477
1478 int prio, static_prio, normal_prio;
1479 unsigned int rt_priority;
1480 const struct sched_class *sched_class;
1481 struct sched_entity se;
1482 struct sched_rt_entity rt;
1483#ifdef CONFIG_CGROUP_SCHED
1484 struct task_group *sched_task_group;
1485#endif
1486 struct sched_dl_entity dl;
1487
1488#ifdef CONFIG_PREEMPT_NOTIFIERS
1489
1490 struct hlist_head preempt_notifiers;
1491#endif
1492
1493#ifdef CONFIG_BLK_DEV_IO_TRACE
1494 unsigned int btrace_seq;
1495#endif
1496
1497 unsigned int policy;
1498 int nr_cpus_allowed;
1499 cpumask_t cpus_allowed;
1500
1501#ifdef CONFIG_PREEMPT_RCU
1502 int rcu_read_lock_nesting;
1503 union rcu_special rcu_read_unlock_special;
1504 struct list_head rcu_node_entry;
1505 struct rcu_node *rcu_blocked_node;
1506#endif
1507#ifdef CONFIG_TASKS_RCU
1508 unsigned long rcu_tasks_nvcsw;
1509 bool rcu_tasks_holdout;
1510 struct list_head rcu_tasks_holdout_list;
1511 int rcu_tasks_idle_cpu;
1512#endif
1513
1514#ifdef CONFIG_SCHED_INFO
1515 struct sched_info sched_info;
1516#endif
1517
1518 struct list_head tasks;
1519#ifdef CONFIG_SMP
1520 struct plist_node pushable_tasks;
1521 struct rb_node pushable_dl_tasks;
1522#endif
1523
1524 struct mm_struct *mm, *active_mm;
1525
1526 u32 vmacache_seqnum;
1527 struct vm_area_struct *vmacache[VMACACHE_SIZE];
1528#if defined(SPLIT_RSS_COUNTING)
1529 struct task_rss_stat rss_stat;
1530#endif
1531
1532 int exit_state;
1533 int exit_code, exit_signal;
1534 int pdeath_signal;
1535 unsigned long jobctl;
1536
1537
1538 unsigned int personality;
1539
1540
1541 unsigned sched_reset_on_fork:1;
1542 unsigned sched_contributes_to_load:1;
1543 unsigned sched_migrated:1;
1544 unsigned sched_remote_wakeup:1;
1545 unsigned :0;
1546
1547
1548 unsigned in_execve:1;
1549 unsigned in_iowait:1;
1550#if !defined(TIF_RESTORE_SIGMASK)
1551 unsigned restore_sigmask:1;
1552#endif
1553#ifdef CONFIG_MEMCG
1554 unsigned memcg_may_oom:1;
1555#ifndef CONFIG_SLOB
1556 unsigned memcg_kmem_skip_account:1;
1557#endif
1558#endif
1559#ifdef CONFIG_COMPAT_BRK
1560 unsigned brk_randomized:1;
1561#endif
1562
1563 unsigned long atomic_flags;
1564
1565 struct restart_block restart_block;
1566
1567 pid_t pid;
1568 pid_t tgid;
1569
1570#ifdef CONFIG_CC_STACKPROTECTOR
1571
1572 unsigned long stack_canary;
1573#endif
1574
1575
1576
1577
1578
1579 struct task_struct __rcu *real_parent;
1580 struct task_struct __rcu *parent;
1581
1582
1583
1584 struct list_head children;
1585 struct list_head sibling;
1586 struct task_struct *group_leader;
1587
1588
1589
1590
1591
1592
1593 struct list_head ptraced;
1594 struct list_head ptrace_entry;
1595
1596
1597 struct pid_link pids[PIDTYPE_MAX];
1598 struct list_head thread_group;
1599 struct list_head thread_node;
1600
1601 struct completion *vfork_done;
1602 int __user *set_child_tid;
1603 int __user *clear_child_tid;
1604
1605 cputime_t utime, stime, utimescaled, stimescaled;
1606 cputime_t gtime;
1607 struct prev_cputime prev_cputime;
1608#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1609 seqcount_t vtime_seqcount;
1610 unsigned long long vtime_snap;
1611 enum {
1612
1613 VTIME_INACTIVE = 0,
1614
1615 VTIME_USER,
1616
1617 VTIME_SYS,
1618 } vtime_snap_whence;
1619#endif
1620
1621#ifdef CONFIG_NO_HZ_FULL
1622 atomic_t tick_dep_mask;
1623#endif
1624 unsigned long nvcsw, nivcsw;
1625 u64 start_time;
1626 u64 real_start_time;
1627
1628 unsigned long min_flt, maj_flt;
1629
1630 struct task_cputime cputime_expires;
1631 struct list_head cpu_timers[3];
1632
1633
1634 const struct cred __rcu *real_cred;
1635
1636 const struct cred __rcu *cred;
1637
1638 char comm[TASK_COMM_LEN];
1639
1640
1641
1642
1643 struct nameidata *nameidata;
1644#ifdef CONFIG_SYSVIPC
1645
1646 struct sysv_sem sysvsem;
1647 struct sysv_shm sysvshm;
1648#endif
1649#ifdef CONFIG_DETECT_HUNG_TASK
1650
1651 unsigned long last_switch_count;
1652#endif
1653
1654 struct fs_struct *fs;
1655
1656 struct files_struct *files;
1657
1658 struct nsproxy *nsproxy;
1659
1660 struct signal_struct *signal;
1661 struct sighand_struct *sighand;
1662
1663 sigset_t blocked, real_blocked;
1664 sigset_t saved_sigmask;
1665 struct sigpending pending;
1666
1667 unsigned long sas_ss_sp;
1668 size_t sas_ss_size;
1669 unsigned sas_ss_flags;
1670
1671 struct callback_head *task_works;
1672
1673 struct audit_context *audit_context;
1674#ifdef CONFIG_AUDITSYSCALL
1675 kuid_t loginuid;
1676 unsigned int sessionid;
1677#endif
1678 struct seccomp seccomp;
1679
1680
1681 u32 parent_exec_id;
1682 u32 self_exec_id;
1683
1684
1685 spinlock_t alloc_lock;
1686
1687
1688 raw_spinlock_t pi_lock;
1689
1690 struct wake_q_node wake_q;
1691
1692#ifdef CONFIG_RT_MUTEXES
1693
1694 struct rb_root pi_waiters;
1695 struct rb_node *pi_waiters_leftmost;
1696
1697 struct rt_mutex_waiter *pi_blocked_on;
1698#endif
1699
1700#ifdef CONFIG_DEBUG_MUTEXES
1701
1702 struct mutex_waiter *blocked_on;
1703#endif
1704#ifdef CONFIG_TRACE_IRQFLAGS
1705 unsigned int irq_events;
1706 unsigned long hardirq_enable_ip;
1707 unsigned long hardirq_disable_ip;
1708 unsigned int hardirq_enable_event;
1709 unsigned int hardirq_disable_event;
1710 int hardirqs_enabled;
1711 int hardirq_context;
1712 unsigned long softirq_disable_ip;
1713 unsigned long softirq_enable_ip;
1714 unsigned int softirq_disable_event;
1715 unsigned int softirq_enable_event;
1716 int softirqs_enabled;
1717 int softirq_context;
1718#endif
1719#ifdef CONFIG_LOCKDEP
1720# define MAX_LOCK_DEPTH 48UL
1721 u64 curr_chain_key;
1722 int lockdep_depth;
1723 unsigned int lockdep_recursion;
1724 struct held_lock held_locks[MAX_LOCK_DEPTH];
1725 gfp_t lockdep_reclaim_gfp;
1726#endif
1727#ifdef CONFIG_UBSAN
1728 unsigned int in_ubsan;
1729#endif
1730
1731
1732 void *journal_info;
1733
1734
1735 struct bio_list *bio_list;
1736
1737#ifdef CONFIG_BLOCK
1738
1739 struct blk_plug *plug;
1740#endif
1741
1742
1743 struct reclaim_state *reclaim_state;
1744
1745 struct backing_dev_info *backing_dev_info;
1746
1747 struct io_context *io_context;
1748
1749 unsigned long ptrace_message;
1750 siginfo_t *last_siginfo;
1751 struct task_io_accounting ioac;
1752#if defined(CONFIG_TASK_XACCT)
1753 u64 acct_rss_mem1;
1754 u64 acct_vm_mem1;
1755 cputime_t acct_timexpd;
1756#endif
1757#ifdef CONFIG_CPUSETS
1758 nodemask_t mems_allowed;
1759 seqcount_t mems_allowed_seq;
1760 int cpuset_mem_spread_rotor;
1761 int cpuset_slab_spread_rotor;
1762#endif
1763#ifdef CONFIG_CGROUPS
1764
1765 struct css_set __rcu *cgroups;
1766
1767 struct list_head cg_list;
1768#endif
1769#ifdef CONFIG_FUTEX
1770 struct robust_list_head __user *robust_list;
1771#ifdef CONFIG_COMPAT
1772 struct compat_robust_list_head __user *compat_robust_list;
1773#endif
1774 struct list_head pi_state_list;
1775 struct futex_pi_state *pi_state_cache;
1776#endif
1777#ifdef CONFIG_PERF_EVENTS
1778 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1779 struct mutex perf_event_mutex;
1780 struct list_head perf_event_list;
1781#endif
1782#ifdef CONFIG_DEBUG_PREEMPT
1783 unsigned long preempt_disable_ip;
1784#endif
1785#ifdef CONFIG_NUMA
1786 struct mempolicy *mempolicy;
1787 short il_next;
1788 short pref_node_fork;
1789#endif
1790#ifdef CONFIG_NUMA_BALANCING
1791 int numa_scan_seq;
1792 unsigned int numa_scan_period;
1793 unsigned int numa_scan_period_max;
1794 int numa_preferred_nid;
1795 unsigned long numa_migrate_retry;
1796 u64 node_stamp;
1797 u64 last_task_numa_placement;
1798 u64 last_sum_exec_runtime;
1799 struct callback_head numa_work;
1800
1801 struct list_head numa_entry;
1802 struct numa_group *numa_group;
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818 unsigned long *numa_faults;
1819 unsigned long total_numa_faults;
1820
1821
1822
1823
1824
1825
1826
1827 unsigned long numa_faults_locality[3];
1828
1829 unsigned long numa_pages_migrated;
1830#endif
1831
1832#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
1833 struct tlbflush_unmap_batch tlb_ubc;
1834#endif
1835
1836 struct rcu_head rcu;
1837
1838
1839
1840
1841 struct pipe_inode_info *splice_pipe;
1842
1843 struct page_frag task_frag;
1844
1845#ifdef CONFIG_TASK_DELAY_ACCT
1846 struct task_delay_info *delays;
1847#endif
1848#ifdef CONFIG_FAULT_INJECTION
1849 int make_it_fail;
1850#endif
1851
1852
1853
1854
1855 int nr_dirtied;
1856 int nr_dirtied_pause;
1857 unsigned long dirty_paused_when;
1858
1859#ifdef CONFIG_LATENCYTOP
1860 int latency_record_count;
1861 struct latency_record latency_record[LT_SAVECOUNT];
1862#endif
1863
1864
1865
1866
1867 u64 timer_slack_ns;
1868 u64 default_timer_slack_ns;
1869
1870#ifdef CONFIG_KASAN
1871 unsigned int kasan_depth;
1872#endif
1873#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1874
1875 int curr_ret_stack;
1876
1877 struct ftrace_ret_stack *ret_stack;
1878
1879 unsigned long long ftrace_timestamp;
1880
1881
1882
1883
1884 atomic_t trace_overrun;
1885
1886 atomic_t tracing_graph_pause;
1887#endif
1888#ifdef CONFIG_TRACING
1889
1890 unsigned long trace;
1891
1892 unsigned long trace_recursion;
1893#endif
1894#ifdef CONFIG_KCOV
1895
1896 enum kcov_mode kcov_mode;
1897
1898 unsigned kcov_size;
1899
1900 void *kcov_area;
1901
1902 struct kcov *kcov;
1903#endif
1904#ifdef CONFIG_MEMCG
1905 struct mem_cgroup *memcg_in_oom;
1906 gfp_t memcg_oom_gfp_mask;
1907 int memcg_oom_order;
1908
1909
1910 unsigned int memcg_nr_pages_over_high;
1911#endif
1912#ifdef CONFIG_UPROBES
1913 struct uprobe_task *utask;
1914#endif
1915#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1916 unsigned int sequential_io;
1917 unsigned int sequential_io_avg;
1918#endif
1919#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1920 unsigned long task_state_change;
1921#endif
1922 int pagefault_disabled;
1923#ifdef CONFIG_MMU
1924 struct task_struct *oom_reaper_list;
1925#endif
1926
1927 struct thread_struct thread;
1928
1929
1930
1931
1932
1933
1934};
1935
1936#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
1937extern int arch_task_struct_size __read_mostly;
1938#else
1939# define arch_task_struct_size (sizeof(struct task_struct))
1940#endif
1941
1942
1943#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1944
1945static inline int tsk_nr_cpus_allowed(struct task_struct *p)
1946{
1947 return p->nr_cpus_allowed;
1948}
1949
1950#define TNF_MIGRATED 0x01
1951#define TNF_NO_GROUP 0x02
1952#define TNF_SHARED 0x04
1953#define TNF_FAULT_LOCAL 0x08
1954#define TNF_MIGRATE_FAIL 0x10
1955
1956static inline bool in_vfork(struct task_struct *tsk)
1957{
1958 bool ret;
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975 rcu_read_lock();
1976 ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm;
1977 rcu_read_unlock();
1978
1979 return ret;
1980}
1981
1982#ifdef CONFIG_NUMA_BALANCING
1983extern void task_numa_fault(int last_node, int node, int pages, int flags);
1984extern pid_t task_numa_group_id(struct task_struct *p);
1985extern void set_numabalancing_state(bool enabled);
1986extern void task_numa_free(struct task_struct *p);
1987extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
1988 int src_nid, int dst_cpu);
1989#else
1990static inline void task_numa_fault(int last_node, int node, int pages,
1991 int flags)
1992{
1993}
1994static inline pid_t task_numa_group_id(struct task_struct *p)
1995{
1996 return 0;
1997}
1998static inline void set_numabalancing_state(bool enabled)
1999{
2000}
2001static inline void task_numa_free(struct task_struct *p)
2002{
2003}
2004static inline bool should_numa_migrate_memory(struct task_struct *p,
2005 struct page *page, int src_nid, int dst_cpu)
2006{
2007 return true;
2008}
2009#endif
2010
2011static inline struct pid *task_pid(struct task_struct *task)
2012{
2013 return task->pids[PIDTYPE_PID].pid;
2014}
2015
2016static inline struct pid *task_tgid(struct task_struct *task)
2017{
2018 return task->group_leader->pids[PIDTYPE_PID].pid;
2019}
2020
2021
2022
2023
2024
2025
2026static inline struct pid *task_pgrp(struct task_struct *task)
2027{
2028 return task->group_leader->pids[PIDTYPE_PGID].pid;
2029}
2030
2031static inline struct pid *task_session(struct task_struct *task)
2032{
2033 return task->group_leader->pids[PIDTYPE_SID].pid;
2034}
2035
2036struct pid_namespace;
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
2052 struct pid_namespace *ns);
2053
2054static inline pid_t task_pid_nr(struct task_struct *tsk)
2055{
2056 return tsk->pid;
2057}
2058
2059static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
2060 struct pid_namespace *ns)
2061{
2062 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
2063}
2064
2065static inline pid_t task_pid_vnr(struct task_struct *tsk)
2066{
2067 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
2068}
2069
2070
2071static inline pid_t task_tgid_nr(struct task_struct *tsk)
2072{
2073 return tsk->tgid;
2074}
2075
2076pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
2077
2078static inline pid_t task_tgid_vnr(struct task_struct *tsk)
2079{
2080 return pid_vnr(task_tgid(tsk));
2081}
2082
2083
2084static inline int pid_alive(const struct task_struct *p);
2085static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
2086{
2087 pid_t pid = 0;
2088
2089 rcu_read_lock();
2090 if (pid_alive(tsk))
2091 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
2092 rcu_read_unlock();
2093
2094 return pid;
2095}
2096
2097static inline pid_t task_ppid_nr(const struct task_struct *tsk)
2098{
2099 return task_ppid_nr_ns(tsk, &init_pid_ns);
2100}
2101
2102static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
2103 struct pid_namespace *ns)
2104{
2105 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
2106}
2107
2108static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
2109{
2110 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
2111}
2112
2113
2114static inline pid_t task_session_nr_ns(struct task_struct *tsk,
2115 struct pid_namespace *ns)
2116{
2117 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
2118}
2119
2120static inline pid_t task_session_vnr(struct task_struct *tsk)
2121{
2122 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
2123}
2124
2125
2126static inline pid_t task_pgrp_nr(struct task_struct *tsk)
2127{
2128 return task_pgrp_nr_ns(tsk, &init_pid_ns);
2129}
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141static inline int pid_alive(const struct task_struct *p)
2142{
2143 return p->pids[PIDTYPE_PID].pid != NULL;
2144}
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155static inline int is_global_init(struct task_struct *tsk)
2156{
2157 return task_tgid_nr(tsk) == 1;
2158}
2159
2160extern struct pid *cad_pid;
2161
2162extern void free_task(struct task_struct *tsk);
2163#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
2164
2165extern void __put_task_struct(struct task_struct *t);
2166
2167static inline void put_task_struct(struct task_struct *t)
2168{
2169 if (atomic_dec_and_test(&t->usage))
2170 __put_task_struct(t);
2171}
2172
2173struct task_struct *task_rcu_dereference(struct task_struct **ptask);
2174struct task_struct *try_get_task_struct(struct task_struct **ptask);
2175
2176#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
2177extern void task_cputime(struct task_struct *t,
2178 cputime_t *utime, cputime_t *stime);
2179extern void task_cputime_scaled(struct task_struct *t,
2180 cputime_t *utimescaled, cputime_t *stimescaled);
2181extern cputime_t task_gtime(struct task_struct *t);
2182#else
2183static inline void task_cputime(struct task_struct *t,
2184 cputime_t *utime, cputime_t *stime)
2185{
2186 if (utime)
2187 *utime = t->utime;
2188 if (stime)
2189 *stime = t->stime;
2190}
2191
2192static inline void task_cputime_scaled(struct task_struct *t,
2193 cputime_t *utimescaled,
2194 cputime_t *stimescaled)
2195{
2196 if (utimescaled)
2197 *utimescaled = t->utimescaled;
2198 if (stimescaled)
2199 *stimescaled = t->stimescaled;
2200}
2201
2202static inline cputime_t task_gtime(struct task_struct *t)
2203{
2204 return t->gtime;
2205}
2206#endif
2207extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
2208extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
2209
2210
2211
2212
2213#define PF_EXITING 0x00000004
2214#define PF_EXITPIDONE 0x00000008
2215#define PF_VCPU 0x00000010
2216#define PF_WQ_WORKER 0x00000020
2217#define PF_FORKNOEXEC 0x00000040
2218#define PF_MCE_PROCESS 0x00000080
2219#define PF_SUPERPRIV 0x00000100
2220#define PF_DUMPCORE 0x00000200
2221#define PF_SIGNALED 0x00000400
2222#define PF_MEMALLOC 0x00000800
2223#define PF_NPROC_EXCEEDED 0x00001000
2224#define PF_USED_MATH 0x00002000
2225#define PF_USED_ASYNC 0x00004000
2226#define PF_NOFREEZE 0x00008000
2227#define PF_FROZEN 0x00010000
2228#define PF_FSTRANS 0x00020000
2229#define PF_KSWAPD 0x00040000
2230#define PF_MEMALLOC_NOIO 0x00080000
2231#define PF_LESS_THROTTLE 0x00100000
2232#define PF_KTHREAD 0x00200000
2233#define PF_RANDOMIZE 0x00400000
2234#define PF_SWAPWRITE 0x00800000
2235#define PF_NO_SETAFFINITY 0x04000000
2236#define PF_MCE_EARLY 0x08000000
2237#define PF_MUTEX_TESTER 0x20000000
2238#define PF_FREEZER_SKIP 0x40000000
2239#define PF_SUSPEND_TASK 0x80000000
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
2253#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
2254#define clear_used_math() clear_stopped_child_used_math(current)
2255#define set_used_math() set_stopped_child_used_math(current)
2256#define conditional_stopped_child_used_math(condition, child) \
2257 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
2258#define conditional_used_math(condition) \
2259 conditional_stopped_child_used_math(condition, current)
2260#define copy_to_stopped_child_used_math(child) \
2261 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
2262
2263#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
2264#define used_math() tsk_used_math(current)
2265
2266
2267
2268
2269static inline gfp_t memalloc_noio_flags(gfp_t flags)
2270{
2271 if (unlikely(current->flags & PF_MEMALLOC_NOIO))
2272 flags &= ~(__GFP_IO | __GFP_FS);
2273 return flags;
2274}
2275
2276static inline unsigned int memalloc_noio_save(void)
2277{
2278 unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
2279 current->flags |= PF_MEMALLOC_NOIO;
2280 return flags;
2281}
2282
2283static inline void memalloc_noio_restore(unsigned int flags)
2284{
2285 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
2286}
2287
2288
2289#define PFA_NO_NEW_PRIVS 0
2290#define PFA_SPREAD_PAGE 1
2291#define PFA_SPREAD_SLAB 2
2292#define PFA_LMK_WAITING 3
2293
2294
2295#define TASK_PFA_TEST(name, func) \
2296 static inline bool task_##func(struct task_struct *p) \
2297 { return test_bit(PFA_##name, &p->atomic_flags); }
2298#define TASK_PFA_SET(name, func) \
2299 static inline void task_set_##func(struct task_struct *p) \
2300 { set_bit(PFA_##name, &p->atomic_flags); }
2301#define TASK_PFA_CLEAR(name, func) \
2302 static inline void task_clear_##func(struct task_struct *p) \
2303 { clear_bit(PFA_##name, &p->atomic_flags); }
2304
2305TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
2306TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
2307
2308TASK_PFA_TEST(SPREAD_PAGE, spread_page)
2309TASK_PFA_SET(SPREAD_PAGE, spread_page)
2310TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
2311
2312TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
2313TASK_PFA_SET(SPREAD_SLAB, spread_slab)
2314TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
2315
2316TASK_PFA_TEST(LMK_WAITING, lmk_waiting)
2317TASK_PFA_SET(LMK_WAITING, lmk_waiting)
2318
2319
2320
2321
2322#define JOBCTL_STOP_SIGMASK 0xffff
2323
2324#define JOBCTL_STOP_DEQUEUED_BIT 16
2325#define JOBCTL_STOP_PENDING_BIT 17
2326#define JOBCTL_STOP_CONSUME_BIT 18
2327#define JOBCTL_TRAP_STOP_BIT 19
2328#define JOBCTL_TRAP_NOTIFY_BIT 20
2329#define JOBCTL_TRAPPING_BIT 21
2330#define JOBCTL_LISTENING_BIT 22
2331
2332#define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT)
2333#define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT)
2334#define JOBCTL_STOP_CONSUME (1UL << JOBCTL_STOP_CONSUME_BIT)
2335#define JOBCTL_TRAP_STOP (1UL << JOBCTL_TRAP_STOP_BIT)
2336#define JOBCTL_TRAP_NOTIFY (1UL << JOBCTL_TRAP_NOTIFY_BIT)
2337#define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT)
2338#define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT)
2339
2340#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
2341#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
2342
2343extern bool task_set_jobctl_pending(struct task_struct *task,
2344 unsigned long mask);
2345extern void task_clear_jobctl_trapping(struct task_struct *task);
2346extern void task_clear_jobctl_pending(struct task_struct *task,
2347 unsigned long mask);
2348
2349static inline void rcu_copy_process(struct task_struct *p)
2350{
2351#ifdef CONFIG_PREEMPT_RCU
2352 p->rcu_read_lock_nesting = 0;
2353 p->rcu_read_unlock_special.s = 0;
2354 p->rcu_blocked_node = NULL;
2355 INIT_LIST_HEAD(&p->rcu_node_entry);
2356#endif
2357#ifdef CONFIG_TASKS_RCU
2358 p->rcu_tasks_holdout = false;
2359 INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
2360 p->rcu_tasks_idle_cpu = -1;
2361#endif
2362}
2363
2364static inline void tsk_restore_flags(struct task_struct *task,
2365 unsigned long orig_flags, unsigned long flags)
2366{
2367 task->flags &= ~flags;
2368 task->flags |= orig_flags & flags;
2369}
2370
2371extern int cpuset_cpumask_can_shrink(const struct cpumask *cur,
2372 const struct cpumask *trial);
2373extern int task_can_attach(struct task_struct *p,
2374 const struct cpumask *cs_cpus_allowed);
2375#ifdef CONFIG_SMP
2376extern void do_set_cpus_allowed(struct task_struct *p,
2377 const struct cpumask *new_mask);
2378
2379extern int set_cpus_allowed_ptr(struct task_struct *p,
2380 const struct cpumask *new_mask);
2381#else
2382static inline void do_set_cpus_allowed(struct task_struct *p,
2383 const struct cpumask *new_mask)
2384{
2385}
2386static inline int set_cpus_allowed_ptr(struct task_struct *p,
2387 const struct cpumask *new_mask)
2388{
2389 if (!cpumask_test_cpu(0, new_mask))
2390 return -EINVAL;
2391 return 0;
2392}
2393#endif
2394
2395#ifdef CONFIG_NO_HZ_COMMON
2396void calc_load_enter_idle(void);
2397void calc_load_exit_idle(void);
2398#else
2399static inline void calc_load_enter_idle(void) { }
2400static inline void calc_load_exit_idle(void) { }
2401#endif
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411extern unsigned long long notrace sched_clock(void);
2412
2413
2414
2415extern u64 running_clock(void);
2416extern u64 sched_clock_cpu(int cpu);
2417
2418
2419extern void sched_clock_init(void);
2420
2421#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2422static inline void sched_clock_tick(void)
2423{
2424}
2425
2426static inline void sched_clock_idle_sleep_event(void)
2427{
2428}
2429
2430static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
2431{
2432}
2433
2434static inline u64 cpu_clock(int cpu)
2435{
2436 return sched_clock();
2437}
2438
2439static inline u64 local_clock(void)
2440{
2441 return sched_clock();
2442}
2443#else
2444
2445
2446
2447
2448
2449
2450extern int sched_clock_stable(void);
2451extern void set_sched_clock_stable(void);
2452extern void clear_sched_clock_stable(void);
2453
2454extern void sched_clock_tick(void);
2455extern void sched_clock_idle_sleep_event(void);
2456extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468static inline u64 cpu_clock(int cpu)
2469{
2470 return sched_clock_cpu(cpu);
2471}
2472
2473static inline u64 local_clock(void)
2474{
2475 return sched_clock_cpu(raw_smp_processor_id());
2476}
2477#endif
2478
2479#ifdef CONFIG_IRQ_TIME_ACCOUNTING
2480
2481
2482
2483
2484
2485extern void enable_sched_clock_irqtime(void);
2486extern void disable_sched_clock_irqtime(void);
2487#else
2488static inline void enable_sched_clock_irqtime(void) {}
2489static inline void disable_sched_clock_irqtime(void) {}
2490#endif
2491
2492extern unsigned long long
2493task_sched_runtime(struct task_struct *task);
2494
2495
2496#ifdef CONFIG_SMP
2497extern void sched_exec(void);
2498#else
2499#define sched_exec() {}
2500#endif
2501
2502extern void sched_clock_idle_sleep_event(void);
2503extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2504
2505#ifdef CONFIG_HOTPLUG_CPU
2506extern void idle_task_exit(void);
2507#else
2508static inline void idle_task_exit(void) {}
2509#endif
2510
2511#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
2512extern void wake_up_nohz_cpu(int cpu);
2513#else
2514static inline void wake_up_nohz_cpu(int cpu) { }
2515#endif
2516
2517#ifdef CONFIG_NO_HZ_FULL
2518extern u64 scheduler_tick_max_deferment(void);
2519#endif
2520
2521#ifdef CONFIG_SCHED_AUTOGROUP
2522extern void sched_autogroup_create_attach(struct task_struct *p);
2523extern void sched_autogroup_detach(struct task_struct *p);
2524extern void sched_autogroup_fork(struct signal_struct *sig);
2525extern void sched_autogroup_exit(struct signal_struct *sig);
2526#ifdef CONFIG_PROC_FS
2527extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
2528extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
2529#endif
2530#else
2531static inline void sched_autogroup_create_attach(struct task_struct *p) { }
2532static inline void sched_autogroup_detach(struct task_struct *p) { }
2533static inline void sched_autogroup_fork(struct signal_struct *sig) { }
2534static inline void sched_autogroup_exit(struct signal_struct *sig) { }
2535#endif
2536
2537extern int yield_to(struct task_struct *p, bool preempt);
2538extern void set_user_nice(struct task_struct *p, long nice);
2539extern int task_prio(const struct task_struct *p);
2540
2541
2542
2543
2544
2545
2546static inline int task_nice(const struct task_struct *p)
2547{
2548 return PRIO_TO_NICE((p)->static_prio);
2549}
2550extern int can_nice(const struct task_struct *p, const int nice);
2551extern int task_curr(const struct task_struct *p);
2552extern int idle_cpu(int cpu);
2553extern int sched_setscheduler(struct task_struct *, int,
2554 const struct sched_param *);
2555extern int sched_setscheduler_nocheck(struct task_struct *, int,
2556 const struct sched_param *);
2557extern int sched_setattr(struct task_struct *,
2558 const struct sched_attr *);
2559extern struct task_struct *idle_task(int cpu);
2560
2561
2562
2563
2564
2565
2566static inline bool is_idle_task(const struct task_struct *p)
2567{
2568 return p->pid == 0;
2569}
2570extern struct task_struct *curr_task(int cpu);
2571extern void set_curr_task(int cpu, struct task_struct *p);
2572
2573void yield(void);
2574
2575union thread_union {
2576 struct thread_info thread_info;
2577 unsigned long stack[THREAD_SIZE/sizeof(long)];
2578};
2579
2580#ifndef __HAVE_ARCH_KSTACK_END
2581static inline int kstack_end(void *addr)
2582{
2583
2584
2585
2586 return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
2587}
2588#endif
2589
2590extern union thread_union init_thread_union;
2591extern struct task_struct init_task;
2592
2593extern struct mm_struct init_mm;
2594
2595extern struct pid_namespace init_pid_ns;
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608extern struct task_struct *find_task_by_vpid(pid_t nr);
2609extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2610 struct pid_namespace *ns);
2611
2612
2613extern struct user_struct * alloc_uid(kuid_t);
2614static inline struct user_struct *get_uid(struct user_struct *u)
2615{
2616 atomic_inc(&u->__count);
2617 return u;
2618}
2619extern void free_uid(struct user_struct *);
2620
2621#include <asm/current.h>
2622
2623extern void xtime_update(unsigned long ticks);
2624
2625extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2626extern int wake_up_process(struct task_struct *tsk);
2627extern void wake_up_new_task(struct task_struct *tsk);
2628#ifdef CONFIG_SMP
2629 extern void kick_process(struct task_struct *tsk);
2630#else
2631 static inline void kick_process(struct task_struct *tsk) { }
2632#endif
2633extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
2634extern void sched_dead(struct task_struct *p);
2635
2636extern void proc_caches_init(void);
2637extern void flush_signals(struct task_struct *);
2638extern void ignore_signals(struct task_struct *);
2639extern void flush_signal_handlers(struct task_struct *, int force_default);
2640extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
2641
2642static inline int kernel_dequeue_signal(siginfo_t *info)
2643{
2644 struct task_struct *tsk = current;
2645 siginfo_t __info;
2646 int ret;
2647
2648 spin_lock_irq(&tsk->sighand->siglock);
2649 ret = dequeue_signal(tsk, &tsk->blocked, info ?: &__info);
2650 spin_unlock_irq(&tsk->sighand->siglock);
2651
2652 return ret;
2653}
2654
2655static inline void kernel_signal_stop(void)
2656{
2657 spin_lock_irq(¤t->sighand->siglock);
2658 if (current->jobctl & JOBCTL_STOP_DEQUEUED)
2659 __set_current_state(TASK_STOPPED);
2660 spin_unlock_irq(¤t->sighand->siglock);
2661
2662 schedule();
2663}
2664
2665extern void release_task(struct task_struct * p);
2666extern int send_sig_info(int, struct siginfo *, struct task_struct *);
2667extern int force_sigsegv(int, struct task_struct *);
2668extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2669extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
2670extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2671extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
2672 const struct cred *, u32);
2673extern int kill_pgrp(struct pid *pid, int sig, int priv);
2674extern int kill_pid(struct pid *pid, int sig, int priv);
2675extern int kill_proc_info(int, struct siginfo *, pid_t);
2676extern __must_check bool do_notify_parent(struct task_struct *, int);
2677extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2678extern void force_sig(int, struct task_struct *);
2679extern int send_sig(int, struct task_struct *, int);
2680extern int zap_other_threads(struct task_struct *p);
2681extern struct sigqueue *sigqueue_alloc(void);
2682extern void sigqueue_free(struct sigqueue *);
2683extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
2684extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
2685
2686#ifdef TIF_RESTORE_SIGMASK
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703static inline void set_restore_sigmask(void)
2704{
2705 set_thread_flag(TIF_RESTORE_SIGMASK);
2706 WARN_ON(!test_thread_flag(TIF_SIGPENDING));
2707}
2708static inline void clear_restore_sigmask(void)
2709{
2710 clear_thread_flag(TIF_RESTORE_SIGMASK);
2711}
2712static inline bool test_restore_sigmask(void)
2713{
2714 return test_thread_flag(TIF_RESTORE_SIGMASK);
2715}
2716static inline bool test_and_clear_restore_sigmask(void)
2717{
2718 return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK);
2719}
2720
2721#else
2722
2723
2724static inline void set_restore_sigmask(void)
2725{
2726 current->restore_sigmask = true;
2727 WARN_ON(!test_thread_flag(TIF_SIGPENDING));
2728}
2729static inline void clear_restore_sigmask(void)
2730{
2731 current->restore_sigmask = false;
2732}
2733static inline bool test_restore_sigmask(void)
2734{
2735 return current->restore_sigmask;
2736}
2737static inline bool test_and_clear_restore_sigmask(void)
2738{
2739 if (!current->restore_sigmask)
2740 return false;
2741 current->restore_sigmask = false;
2742 return true;
2743}
2744#endif
2745
2746static inline void restore_saved_sigmask(void)
2747{
2748 if (test_and_clear_restore_sigmask())
2749 __set_current_blocked(¤t->saved_sigmask);
2750}
2751
2752static inline sigset_t *sigmask_to_save(void)
2753{
2754 sigset_t *res = ¤t->blocked;
2755 if (unlikely(test_restore_sigmask()))
2756 res = ¤t->saved_sigmask;
2757 return res;
2758}
2759
2760static inline int kill_cad_pid(int sig, int priv)
2761{
2762 return kill_pid(cad_pid, sig, priv);
2763}
2764
2765
2766#define SEND_SIG_NOINFO ((struct siginfo *) 0)
2767#define SEND_SIG_PRIV ((struct siginfo *) 1)
2768#define SEND_SIG_FORCED ((struct siginfo *) 2)
2769
2770
2771
2772
2773static inline int on_sig_stack(unsigned long sp)
2774{
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784 if (current->sas_ss_flags & SS_AUTODISARM)
2785 return 0;
2786
2787#ifdef CONFIG_STACK_GROWSUP
2788 return sp >= current->sas_ss_sp &&
2789 sp - current->sas_ss_sp < current->sas_ss_size;
2790#else
2791 return sp > current->sas_ss_sp &&
2792 sp - current->sas_ss_sp <= current->sas_ss_size;
2793#endif
2794}
2795
2796static inline int sas_ss_flags(unsigned long sp)
2797{
2798 if (!current->sas_ss_size)
2799 return SS_DISABLE;
2800
2801 return on_sig_stack(sp) ? SS_ONSTACK : 0;
2802}
2803
2804static inline void sas_ss_reset(struct task_struct *p)
2805{
2806 p->sas_ss_sp = 0;
2807 p->sas_ss_size = 0;
2808 p->sas_ss_flags = SS_DISABLE;
2809}
2810
2811static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
2812{
2813 if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
2814#ifdef CONFIG_STACK_GROWSUP
2815 return current->sas_ss_sp;
2816#else
2817 return current->sas_ss_sp + current->sas_ss_size;
2818#endif
2819 return sp;
2820}
2821
2822
2823
2824
2825extern struct mm_struct * mm_alloc(void);
2826
2827
2828extern void __mmdrop(struct mm_struct *);
2829static inline void mmdrop(struct mm_struct *mm)
2830{
2831 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
2832 __mmdrop(mm);
2833}
2834
2835static inline bool mmget_not_zero(struct mm_struct *mm)
2836{
2837 return atomic_inc_not_zero(&mm->mm_users);
2838}
2839
2840
2841extern void mmput(struct mm_struct *);
2842#ifdef CONFIG_MMU
2843
2844
2845
2846extern void mmput_async(struct mm_struct *);
2847#endif
2848
2849
2850extern struct mm_struct *get_task_mm(struct task_struct *task);
2851
2852
2853
2854
2855
2856extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
2857
2858extern void mm_release(struct task_struct *, struct mm_struct *);
2859
2860#ifdef CONFIG_HAVE_COPY_THREAD_TLS
2861extern int copy_thread_tls(unsigned long, unsigned long, unsigned long,
2862 struct task_struct *, unsigned long);
2863#else
2864extern int copy_thread(unsigned long, unsigned long, unsigned long,
2865 struct task_struct *);
2866
2867
2868
2869static inline int copy_thread_tls(
2870 unsigned long clone_flags, unsigned long sp, unsigned long arg,
2871 struct task_struct *p, unsigned long tls)
2872{
2873 return copy_thread(clone_flags, sp, arg, p);
2874}
2875#endif
2876extern void flush_thread(void);
2877
2878#ifdef CONFIG_HAVE_EXIT_THREAD
2879extern void exit_thread(struct task_struct *tsk);
2880#else
2881static inline void exit_thread(struct task_struct *tsk)
2882{
2883}
2884#endif
2885
2886extern void exit_files(struct task_struct *);
2887extern void __cleanup_sighand(struct sighand_struct *);
2888
2889extern void exit_itimers(struct signal_struct *);
2890extern void flush_itimer_signals(void);
2891
2892extern void do_group_exit(int);
2893
2894extern int do_execve(struct filename *,
2895 const char __user * const __user *,
2896 const char __user * const __user *);
2897extern int do_execveat(int, struct filename *,
2898 const char __user * const __user *,
2899 const char __user * const __user *,
2900 int);
2901extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long);
2902extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
2903struct task_struct *fork_idle(int);
2904extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
2905
2906extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
2907static inline void set_task_comm(struct task_struct *tsk, const char *from)
2908{
2909 __set_task_comm(tsk, from, false);
2910}
2911extern char *get_task_comm(char *to, struct task_struct *tsk);
2912
2913#ifdef CONFIG_SMP
2914void scheduler_ipi(void);
2915extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
2916#else
2917static inline void scheduler_ipi(void) { }
2918static inline unsigned long wait_task_inactive(struct task_struct *p,
2919 long match_state)
2920{
2921 return 1;
2922}
2923#endif
2924
2925#define tasklist_empty() \
2926 list_empty(&init_task.tasks)
2927
2928#define next_task(p) \
2929 list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
2930
2931#define for_each_process(p) \
2932 for (p = &init_task ; (p = next_task(p)) != &init_task ; )
2933
2934extern bool current_is_single_threaded(void);
2935
2936
2937
2938
2939
2940#define do_each_thread(g, t) \
2941 for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
2942
2943#define while_each_thread(g, t) \
2944 while ((t = next_thread(t)) != g)
2945
2946#define __for_each_thread(signal, t) \
2947 list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
2948
2949#define for_each_thread(p, t) \
2950 __for_each_thread((p)->signal, t)
2951
2952
2953#define for_each_process_thread(p, t) \
2954 for_each_process(p) for_each_thread(p, t)
2955
2956static inline int get_nr_threads(struct task_struct *tsk)
2957{
2958 return tsk->signal->nr_threads;
2959}
2960
2961static inline bool thread_group_leader(struct task_struct *p)
2962{
2963 return p->exit_signal >= 0;
2964}
2965
2966
2967
2968
2969
2970
2971
2972static inline bool has_group_leader_pid(struct task_struct *p)
2973{
2974 return task_pid(p) == p->signal->leader_pid;
2975}
2976
2977static inline
2978bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
2979{
2980 return p1->signal == p2->signal;
2981}
2982
2983static inline struct task_struct *next_thread(const struct task_struct *p)
2984{
2985 return list_entry_rcu(p->thread_group.next,
2986 struct task_struct, thread_group);
2987}
2988
2989static inline int thread_group_empty(struct task_struct *p)
2990{
2991 return list_empty(&p->thread_group);
2992}
2993
2994#define delay_group_leader(p) \
2995 (thread_group_leader(p) && !thread_group_empty(p))
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007static inline void task_lock(struct task_struct *p)
3008{
3009 spin_lock(&p->alloc_lock);
3010}
3011
3012static inline void task_unlock(struct task_struct *p)
3013{
3014 spin_unlock(&p->alloc_lock);
3015}
3016
3017extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
3018 unsigned long *flags);
3019
3020static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
3021 unsigned long *flags)
3022{
3023 struct sighand_struct *ret;
3024
3025 ret = __lock_task_sighand(tsk, flags);
3026 (void)__cond_lock(&tsk->sighand->siglock, ret);
3027 return ret;
3028}
3029
3030static inline void unlock_task_sighand(struct task_struct *tsk,
3031 unsigned long *flags)
3032{
3033 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
3034}
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047static inline void threadgroup_change_begin(struct task_struct *tsk)
3048{
3049 might_sleep();
3050 cgroup_threadgroup_change_begin(tsk);
3051}
3052
3053
3054
3055
3056
3057
3058
3059static inline void threadgroup_change_end(struct task_struct *tsk)
3060{
3061 cgroup_threadgroup_change_end(tsk);
3062}
3063
3064#ifndef __HAVE_THREAD_FUNCTIONS
3065
3066#define task_thread_info(task) ((struct thread_info *)(task)->stack)
3067#define task_stack_page(task) ((task)->stack)
3068
3069static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
3070{
3071 *task_thread_info(p) = *task_thread_info(org);
3072 task_thread_info(p)->task = p;
3073}
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084static inline unsigned long *end_of_stack(struct task_struct *p)
3085{
3086#ifdef CONFIG_STACK_GROWSUP
3087 return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
3088#else
3089 return (unsigned long *)(task_thread_info(p) + 1);
3090#endif
3091}
3092
3093#endif
3094#define task_stack_end_corrupted(task) \
3095 (*(end_of_stack(task)) != STACK_END_MAGIC)
3096
3097static inline int object_is_on_stack(void *obj)
3098{
3099 void *stack = task_stack_page(current);
3100
3101 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
3102}
3103
3104extern void thread_stack_cache_init(void);
3105
3106#ifdef CONFIG_DEBUG_STACK_USAGE
3107static inline unsigned long stack_not_used(struct task_struct *p)
3108{
3109 unsigned long *n = end_of_stack(p);
3110
3111 do {
3112# ifdef CONFIG_STACK_GROWSUP
3113 n--;
3114# else
3115 n++;
3116# endif
3117 } while (!*n);
3118
3119# ifdef CONFIG_STACK_GROWSUP
3120 return (unsigned long)end_of_stack(p) - (unsigned long)n;
3121# else
3122 return (unsigned long)n - (unsigned long)end_of_stack(p);
3123# endif
3124}
3125#endif
3126extern void set_task_stack_end_magic(struct task_struct *tsk);
3127
3128
3129
3130
3131static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
3132{
3133 set_ti_thread_flag(task_thread_info(tsk), flag);
3134}
3135
3136static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
3137{
3138 clear_ti_thread_flag(task_thread_info(tsk), flag);
3139}
3140
3141static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
3142{
3143 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
3144}
3145
3146static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
3147{
3148 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
3149}
3150
3151static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
3152{
3153 return test_ti_thread_flag(task_thread_info(tsk), flag);
3154}
3155
3156static inline void set_tsk_need_resched(struct task_struct *tsk)
3157{
3158 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
3159}
3160
3161static inline void clear_tsk_need_resched(struct task_struct *tsk)
3162{
3163 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
3164}
3165
3166static inline int test_tsk_need_resched(struct task_struct *tsk)
3167{
3168 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
3169}
3170
3171static inline int restart_syscall(void)
3172{
3173 set_tsk_thread_flag(current, TIF_SIGPENDING);
3174 return -ERESTARTNOINTR;
3175}
3176
3177static inline int signal_pending(struct task_struct *p)
3178{
3179 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
3180}
3181
3182static inline int __fatal_signal_pending(struct task_struct *p)
3183{
3184 return unlikely(sigismember(&p->pending.signal, SIGKILL));
3185}
3186
3187static inline int fatal_signal_pending(struct task_struct *p)
3188{
3189 return signal_pending(p) && __fatal_signal_pending(p);
3190}
3191
3192static inline int signal_pending_state(long state, struct task_struct *p)
3193{
3194 if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
3195 return 0;
3196 if (!signal_pending(p))
3197 return 0;
3198
3199 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
3200}
3201
3202
3203
3204
3205
3206
3207
3208
3209extern int _cond_resched(void);
3210
3211#define cond_resched() ({ \
3212 ___might_sleep(__FILE__, __LINE__, 0); \
3213 _cond_resched(); \
3214})
3215
3216extern int __cond_resched_lock(spinlock_t *lock);
3217
3218#define cond_resched_lock(lock) ({ \
3219 ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
3220 __cond_resched_lock(lock); \
3221})
3222
3223extern int __cond_resched_softirq(void);
3224
3225#define cond_resched_softirq() ({ \
3226 ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
3227 __cond_resched_softirq(); \
3228})
3229
3230static inline void cond_resched_rcu(void)
3231{
3232#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
3233 rcu_read_unlock();
3234 cond_resched();
3235 rcu_read_lock();
3236#endif
3237}
3238
3239
3240
3241
3242
3243
3244static inline int spin_needbreak(spinlock_t *lock)
3245{
3246#ifdef CONFIG_PREEMPT
3247 return spin_is_contended(lock);
3248#else
3249 return 0;
3250#endif
3251}
3252
3253
3254
3255
3256
3257#ifdef TIF_POLLING_NRFLAG
3258static inline int tsk_is_polling(struct task_struct *p)
3259{
3260 return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
3261}
3262
3263static inline void __current_set_polling(void)
3264{
3265 set_thread_flag(TIF_POLLING_NRFLAG);
3266}
3267
3268static inline bool __must_check current_set_polling_and_test(void)
3269{
3270 __current_set_polling();
3271
3272
3273
3274
3275
3276 smp_mb__after_atomic();
3277
3278 return unlikely(tif_need_resched());
3279}
3280
3281static inline void __current_clr_polling(void)
3282{
3283 clear_thread_flag(TIF_POLLING_NRFLAG);
3284}
3285
3286static inline bool __must_check current_clr_polling_and_test(void)
3287{
3288 __current_clr_polling();
3289
3290
3291
3292
3293
3294 smp_mb__after_atomic();
3295
3296 return unlikely(tif_need_resched());
3297}
3298
3299#else
3300static inline int tsk_is_polling(struct task_struct *p) { return 0; }
3301static inline void __current_set_polling(void) { }
3302static inline void __current_clr_polling(void) { }
3303
3304static inline bool __must_check current_set_polling_and_test(void)
3305{
3306 return unlikely(tif_need_resched());
3307}
3308static inline bool __must_check current_clr_polling_and_test(void)
3309{
3310 return unlikely(tif_need_resched());
3311}
3312#endif
3313
3314static inline void current_clr_polling(void)
3315{
3316 __current_clr_polling();
3317
3318
3319
3320
3321
3322
3323
3324 smp_mb();
3325
3326 preempt_fold_need_resched();
3327}
3328
3329static __always_inline bool need_resched(void)
3330{
3331 return unlikely(tif_need_resched());
3332}
3333
3334
3335
3336
3337void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
3338void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
3339
3340
3341
3342
3343
3344
3345
3346extern void recalc_sigpending_and_wake(struct task_struct *t);
3347extern void recalc_sigpending(void);
3348
3349extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
3350
3351static inline void signal_wake_up(struct task_struct *t, bool resume)
3352{
3353 signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
3354}
3355static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
3356{
3357 signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
3358}
3359
3360
3361
3362
3363#ifdef CONFIG_SMP
3364
3365static inline unsigned int task_cpu(const struct task_struct *p)
3366{
3367 return task_thread_info(p)->cpu;
3368}
3369
3370static inline int task_node(const struct task_struct *p)
3371{
3372 return cpu_to_node(task_cpu(p));
3373}
3374
3375extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
3376
3377#else
3378
3379static inline unsigned int task_cpu(const struct task_struct *p)
3380{
3381 return 0;
3382}
3383
3384static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
3385{
3386}
3387
3388#endif
3389
3390extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
3391extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
3392
3393#ifdef CONFIG_CGROUP_SCHED
3394extern struct task_group root_task_group;
3395#endif
3396
3397extern int task_can_switch_user(struct user_struct *up,
3398 struct task_struct *tsk);
3399
3400#ifdef CONFIG_TASK_XACCT
3401static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
3402{
3403 tsk->ioac.rchar += amt;
3404}
3405
3406static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
3407{
3408 tsk->ioac.wchar += amt;
3409}
3410
3411static inline void inc_syscr(struct task_struct *tsk)
3412{
3413 tsk->ioac.syscr++;
3414}
3415
3416static inline void inc_syscw(struct task_struct *tsk)
3417{
3418 tsk->ioac.syscw++;
3419}
3420#else
3421static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
3422{
3423}
3424
3425static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
3426{
3427}
3428
3429static inline void inc_syscr(struct task_struct *tsk)
3430{
3431}
3432
3433static inline void inc_syscw(struct task_struct *tsk)
3434{
3435}
3436#endif
3437
3438#ifndef TASK_SIZE_OF
3439#define TASK_SIZE_OF(tsk) TASK_SIZE
3440#endif
3441
3442#ifdef CONFIG_MEMCG
3443extern void mm_update_next_owner(struct mm_struct *mm);
3444#else
3445static inline void mm_update_next_owner(struct mm_struct *mm)
3446{
3447}
3448#endif
3449
3450static inline unsigned long task_rlimit(const struct task_struct *tsk,
3451 unsigned int limit)
3452{
3453 return READ_ONCE(tsk->signal->rlim[limit].rlim_cur);
3454}
3455
3456static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
3457 unsigned int limit)
3458{
3459 return READ_ONCE(tsk->signal->rlim[limit].rlim_max);
3460}
3461
3462static inline unsigned long rlimit(unsigned int limit)
3463{
3464 return task_rlimit(current, limit);
3465}
3466
3467static inline unsigned long rlimit_max(unsigned int limit)
3468{
3469 return task_rlimit_max(current, limit);
3470}
3471
3472#ifdef CONFIG_CPU_FREQ
3473struct update_util_data {
3474 void (*func)(struct update_util_data *data,
3475 u64 time, unsigned long util, unsigned long max);
3476};
3477
3478void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
3479 void (*func)(struct update_util_data *data, u64 time,
3480 unsigned long util, unsigned long max));
3481void cpufreq_remove_update_util_hook(int cpu);
3482#endif
3483
3484#endif
3485