1#ifndef _LINUX_SCHED_H
2#define _LINUX_SCHED_H
3
4#include <uapi/linux/sched.h>
5
6#include <linux/sched/prio.h>
7
8
9struct sched_param {
10 int sched_priority;
11};
12
13#include <asm/param.h>
14
15#include <linux/capability.h>
16#include <linux/threads.h>
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/timex.h>
20#include <linux/jiffies.h>
21#include <linux/plist.h>
22#include <linux/rbtree.h>
23#include <linux/thread_info.h>
24#include <linux/cpumask.h>
25#include <linux/errno.h>
26#include <linux/nodemask.h>
27#include <linux/mm_types.h>
28#include <linux/preempt_mask.h>
29
30#include <asm/page.h>
31#include <asm/ptrace.h>
32#include <linux/cputime.h>
33
34#include <linux/smp.h>
35#include <linux/sem.h>
36#include <linux/signal.h>
37#include <linux/compiler.h>
38#include <linux/completion.h>
39#include <linux/pid.h>
40#include <linux/percpu.h>
41#include <linux/topology.h>
42#include <linux/proportions.h>
43#include <linux/seccomp.h>
44#include <linux/rcupdate.h>
45#include <linux/rculist.h>
46#include <linux/rtmutex.h>
47
48#include <linux/time.h>
49#include <linux/param.h>
50#include <linux/resource.h>
51#include <linux/timer.h>
52#include <linux/hrtimer.h>
53#include <linux/task_io_accounting.h>
54#include <linux/latencytop.h>
55#include <linux/cred.h>
56#include <linux/llist.h>
57#include <linux/uidgid.h>
58#include <linux/gfp.h>
59
60#include <asm/processor.h>
61
62#define SCHED_ATTR_SIZE_VER0 48
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108struct sched_attr {
109 u32 size;
110
111 u32 sched_policy;
112 u64 sched_flags;
113
114
115 s32 sched_nice;
116
117
118 u32 sched_priority;
119
120
121 u64 sched_runtime;
122 u64 sched_deadline;
123 u64 sched_period;
124};
125
126struct exec_domain;
127struct futex_pi_state;
128struct robust_list_head;
129struct bio_list;
130struct fs_struct;
131struct perf_event_context;
132struct blk_plug;
133struct filename;
134
135#define VMACACHE_BITS 2
136#define VMACACHE_SIZE (1U << VMACACHE_BITS)
137#define VMACACHE_MASK (VMACACHE_SIZE - 1)
138
139
140
141
142
143
144
145
146
147
148
149extern unsigned long avenrun[];
150extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
151
152#define FSHIFT 11
153#define FIXED_1 (1<<FSHIFT)
154#define LOAD_FREQ (5*HZ+1)
155#define EXP_1 1884
156#define EXP_5 2014
157#define EXP_15 2037
158
159#define CALC_LOAD(load,exp,n) \
160 load *= exp; \
161 load += n*(FIXED_1-exp); \
162 load >>= FSHIFT;
163
164extern unsigned long total_forks;
165extern int nr_threads;
166DECLARE_PER_CPU(unsigned long, process_counts);
167extern int nr_processes(void);
168extern unsigned long nr_running(void);
169extern unsigned long nr_iowait(void);
170extern unsigned long nr_iowait_cpu(int cpu);
171extern unsigned long this_cpu_load(void);
172
173
174extern void calc_global_load(unsigned long ticks);
175extern void update_cpu_load_nohz(void);
176
177extern unsigned long get_parent_ip(unsigned long addr);
178
179extern void dump_cpu_task(int cpu);
180
181struct seq_file;
182struct cfs_rq;
183struct task_group;
184#ifdef CONFIG_SCHED_DEBUG
185extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
186extern void proc_sched_set_task(struct task_struct *p);
187extern void
188print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
189#endif
190
191
192
193
194
195
196
197
198
199
200
201#define TASK_RUNNING 0
202#define TASK_INTERRUPTIBLE 1
203#define TASK_UNINTERRUPTIBLE 2
204#define __TASK_STOPPED 4
205#define __TASK_TRACED 8
206
207#define EXIT_DEAD 16
208#define EXIT_ZOMBIE 32
209#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
210
211#define TASK_DEAD 64
212#define TASK_WAKEKILL 128
213#define TASK_WAKING 256
214#define TASK_PARKED 512
215#define TASK_STATE_MAX 1024
216
217#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWP"
218
219extern char ___assert_task_state[1 - 2*!!(
220 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
221
222
223#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
224#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
225#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
226
227
228#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
229#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
230
231
232#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
233 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
234 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
235
236#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
237#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
238#define task_is_stopped_or_traced(task) \
239 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
240#define task_contributes_to_load(task) \
241 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
242 (task->flags & PF_FROZEN) == 0)
243
244#define __set_task_state(tsk, state_value) \
245 do { (tsk)->state = (state_value); } while (0)
246#define set_task_state(tsk, state_value) \
247 set_mb((tsk)->state, (state_value))
248
249
250
251
252
253
254
255
256
257
258
259
260#define __set_current_state(state_value) \
261 do { current->state = (state_value); } while (0)
262#define set_current_state(state_value) \
263 set_mb(current->state, (state_value))
264
265
266#define TASK_COMM_LEN 16
267
268#include <linux/spinlock.h>
269
270
271
272
273
274
275
276extern rwlock_t tasklist_lock;
277extern spinlock_t mmlist_lock;
278
279struct task_struct;
280
281#ifdef CONFIG_PROVE_RCU
282extern int lockdep_tasklist_lock_is_held(void);
283#endif
284
285extern void sched_init(void);
286extern void sched_init_smp(void);
287extern asmlinkage void schedule_tail(struct task_struct *prev);
288extern void init_idle(struct task_struct *idle, int cpu);
289extern void init_idle_bootup_task(struct task_struct *idle);
290
291extern int runqueue_is_locked(int cpu);
292
293#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
294extern void nohz_balance_enter_idle(int cpu);
295extern void set_cpu_sd_state_idle(void);
296extern int get_nohz_timer_target(int pinned);
297#else
298static inline void nohz_balance_enter_idle(int cpu) { }
299static inline void set_cpu_sd_state_idle(void) { }
300static inline int get_nohz_timer_target(int pinned)
301{
302 return smp_processor_id();
303}
304#endif
305
306
307
308
309extern void show_state_filter(unsigned long state_filter);
310
311static inline void show_state(void)
312{
313 show_state_filter(0);
314}
315
316extern void show_regs(struct pt_regs *);
317
318
319
320
321
322
323extern void show_stack(struct task_struct *task, unsigned long *sp);
324
325void io_schedule(void);
326long io_schedule_timeout(long timeout);
327
328extern void cpu_init (void);
329extern void trap_init(void);
330extern void update_process_times(int user);
331extern void scheduler_tick(void);
332
333extern void sched_show_task(struct task_struct *p);
334
335#ifdef CONFIG_LOCKUP_DETECTOR
336extern void touch_softlockup_watchdog(void);
337extern void touch_softlockup_watchdog_sync(void);
338extern void touch_all_softlockup_watchdogs(void);
339extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
340 void __user *buffer,
341 size_t *lenp, loff_t *ppos);
342extern unsigned int softlockup_panic;
343void lockup_detector_init(void);
344#else
345static inline void touch_softlockup_watchdog(void)
346{
347}
348static inline void touch_softlockup_watchdog_sync(void)
349{
350}
351static inline void touch_all_softlockup_watchdogs(void)
352{
353}
354static inline void lockup_detector_init(void)
355{
356}
357#endif
358
359#ifdef CONFIG_DETECT_HUNG_TASK
360void reset_hung_task_detector(void);
361#else
362static inline void reset_hung_task_detector(void)
363{
364}
365#endif
366
367
368#define __sched __attribute__((__section__(".sched.text")))
369
370
371extern char __sched_text_start[], __sched_text_end[];
372
373
374extern int in_sched_functions(unsigned long addr);
375
376#define MAX_SCHEDULE_TIMEOUT LONG_MAX
377extern signed long schedule_timeout(signed long timeout);
378extern signed long schedule_timeout_interruptible(signed long timeout);
379extern signed long schedule_timeout_killable(signed long timeout);
380extern signed long schedule_timeout_uninterruptible(signed long timeout);
381asmlinkage void schedule(void);
382extern void schedule_preempt_disabled(void);
383
384struct nsproxy;
385struct user_namespace;
386
387#ifdef CONFIG_MMU
388extern void arch_pick_mmap_layout(struct mm_struct *mm);
389extern unsigned long
390arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
391 unsigned long, unsigned long);
392extern unsigned long
393arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
394 unsigned long len, unsigned long pgoff,
395 unsigned long flags);
396#else
397static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
398#endif
399
400#define SUID_DUMP_DISABLE 0
401#define SUID_DUMP_USER 1
402#define SUID_DUMP_ROOT 2
403
404
405
406
407#define MMF_DUMPABLE_BITS 2
408#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
409
410extern void set_dumpable(struct mm_struct *mm, int value);
411
412
413
414
415
416
417static inline int __get_dumpable(unsigned long mm_flags)
418{
419 return mm_flags & MMF_DUMPABLE_MASK;
420}
421
422static inline int get_dumpable(struct mm_struct *mm)
423{
424 return __get_dumpable(mm->flags);
425}
426
427
428#define MMF_DUMP_ANON_PRIVATE 2
429#define MMF_DUMP_ANON_SHARED 3
430#define MMF_DUMP_MAPPED_PRIVATE 4
431#define MMF_DUMP_MAPPED_SHARED 5
432#define MMF_DUMP_ELF_HEADERS 6
433#define MMF_DUMP_HUGETLB_PRIVATE 7
434#define MMF_DUMP_HUGETLB_SHARED 8
435
436#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
437#define MMF_DUMP_FILTER_BITS 7
438#define MMF_DUMP_FILTER_MASK \
439 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
440#define MMF_DUMP_FILTER_DEFAULT \
441 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\
442 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
443
444#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
445# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS)
446#else
447# define MMF_DUMP_MASK_DEFAULT_ELF 0
448#endif
449
450#define MMF_VM_MERGEABLE 16
451#define MMF_VM_HUGEPAGE 17
452#define MMF_EXE_FILE_CHANGED 18
453
454#define MMF_HAS_UPROBES 19
455#define MMF_RECALC_UPROBES 20
456
457#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
458
459struct sighand_struct {
460 atomic_t count;
461 struct k_sigaction action[_NSIG];
462 spinlock_t siglock;
463 wait_queue_head_t signalfd_wqh;
464};
465
466struct pacct_struct {
467 int ac_flag;
468 long ac_exitcode;
469 unsigned long ac_mem;
470 cputime_t ac_utime, ac_stime;
471 unsigned long ac_minflt, ac_majflt;
472};
473
474struct cpu_itimer {
475 cputime_t expires;
476 cputime_t incr;
477 u32 error;
478 u32 incr_error;
479};
480
481
482
483
484
485
486
487
488struct cputime {
489 cputime_t utime;
490 cputime_t stime;
491};
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507struct task_cputime {
508 cputime_t utime;
509 cputime_t stime;
510 unsigned long long sum_exec_runtime;
511};
512
513#define prof_exp stime
514#define virt_exp utime
515#define sched_exp sum_exec_runtime
516
517#define INIT_CPUTIME \
518 (struct task_cputime) { \
519 .utime = 0, \
520 .stime = 0, \
521 .sum_exec_runtime = 0, \
522 }
523
524#ifdef CONFIG_PREEMPT_COUNT
525#define PREEMPT_DISABLED (1 + PREEMPT_ENABLED)
526#else
527#define PREEMPT_DISABLED PREEMPT_ENABLED
528#endif
529
530
531
532
533
534
535
536
537#define INIT_PREEMPT_COUNT (PREEMPT_DISABLED + PREEMPT_ACTIVE)
538
539
540
541
542
543
544
545
546
547
548
549struct thread_group_cputimer {
550 struct task_cputime cputime;
551 int running;
552 raw_spinlock_t lock;
553};
554
555#include <linux/rwsem.h>
556struct autogroup;
557
558
559
560
561
562
563
564
565struct signal_struct {
566 atomic_t sigcnt;
567 atomic_t live;
568 int nr_threads;
569 struct list_head thread_head;
570
571 wait_queue_head_t wait_chldexit;
572
573
574 struct task_struct *curr_target;
575
576
577 struct sigpending shared_pending;
578
579
580 int group_exit_code;
581
582
583
584
585
586 int notify_count;
587 struct task_struct *group_exit_task;
588
589
590 int group_stop_count;
591 unsigned int flags;
592
593
594
595
596
597
598
599
600
601
602 unsigned int is_child_subreaper:1;
603 unsigned int has_child_subreaper:1;
604
605
606 int posix_timer_id;
607 struct list_head posix_timers;
608
609
610 struct hrtimer real_timer;
611 struct pid *leader_pid;
612 ktime_t it_real_incr;
613
614
615
616
617
618
619 struct cpu_itimer it[2];
620
621
622
623
624
625 struct thread_group_cputimer cputimer;
626
627
628 struct task_cputime cputime_expires;
629
630 struct list_head cpu_timers[3];
631
632 struct pid *tty_old_pgrp;
633
634
635 int leader;
636
637 struct tty_struct *tty;
638
639#ifdef CONFIG_SCHED_AUTOGROUP
640 struct autogroup *autogroup;
641#endif
642
643
644
645
646
647
648 cputime_t utime, stime, cutime, cstime;
649 cputime_t gtime;
650 cputime_t cgtime;
651#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
652 struct cputime prev_cputime;
653#endif
654 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
655 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
656 unsigned long inblock, oublock, cinblock, coublock;
657 unsigned long maxrss, cmaxrss;
658 struct task_io_accounting ioac;
659
660
661
662
663
664
665
666 unsigned long long sum_sched_runtime;
667
668
669
670
671
672
673
674
675
676
677 struct rlimit rlim[RLIM_NLIMITS];
678
679#ifdef CONFIG_BSD_PROCESS_ACCT
680 struct pacct_struct pacct;
681#endif
682#ifdef CONFIG_TASKSTATS
683 struct taskstats *stats;
684#endif
685#ifdef CONFIG_AUDIT
686 unsigned audit_tty;
687 unsigned audit_tty_log_passwd;
688 struct tty_audit_buf *tty_audit_buf;
689#endif
690#ifdef CONFIG_CGROUPS
691
692
693
694
695
696
697
698
699
700 struct rw_semaphore group_rwsem;
701#endif
702
703 oom_flags_t oom_flags;
704 short oom_score_adj;
705 short oom_score_adj_min;
706
707
708 struct mutex cred_guard_mutex;
709
710
711};
712
713
714
715
716#define SIGNAL_STOP_STOPPED 0x00000001
717#define SIGNAL_STOP_CONTINUED 0x00000002
718#define SIGNAL_GROUP_EXIT 0x00000004
719#define SIGNAL_GROUP_COREDUMP 0x00000008
720
721
722
723#define SIGNAL_CLD_STOPPED 0x00000010
724#define SIGNAL_CLD_CONTINUED 0x00000020
725#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
726
727#define SIGNAL_UNKILLABLE 0x00000040
728
729
730static inline int signal_group_exit(const struct signal_struct *sig)
731{
732 return (sig->flags & SIGNAL_GROUP_EXIT) ||
733 (sig->group_exit_task != NULL);
734}
735
736
737
738
739struct user_struct {
740 atomic_t __count;
741 atomic_t processes;
742 atomic_t sigpending;
743#ifdef CONFIG_INOTIFY_USER
744 atomic_t inotify_watches;
745 atomic_t inotify_devs;
746#endif
747#ifdef CONFIG_FANOTIFY
748 atomic_t fanotify_listeners;
749#endif
750#ifdef CONFIG_EPOLL
751 atomic_long_t epoll_watches;
752#endif
753#ifdef CONFIG_POSIX_MQUEUE
754
755 unsigned long mq_bytes;
756#endif
757 unsigned long locked_shm;
758
759#ifdef CONFIG_KEYS
760 struct key *uid_keyring;
761 struct key *session_keyring;
762#endif
763
764
765 struct hlist_node uidhash_node;
766 kuid_t uid;
767
768#ifdef CONFIG_PERF_EVENTS
769 atomic_long_t locked_vm;
770#endif
771};
772
773extern int uids_sysfs_init(void);
774
775extern struct user_struct *find_user(kuid_t);
776
777extern struct user_struct root_user;
778#define INIT_USER (&root_user)
779
780
781struct backing_dev_info;
782struct reclaim_state;
783
784#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
785struct sched_info {
786
787 unsigned long pcount;
788 unsigned long long run_delay;
789
790
791 unsigned long long last_arrival,
792 last_queued;
793};
794#endif
795
796#ifdef CONFIG_TASK_DELAY_ACCT
797struct task_delay_info {
798 spinlock_t lock;
799 unsigned int flags;
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816 struct timespec blkio_start, blkio_end;
817 u64 blkio_delay;
818 u64 swapin_delay;
819 u32 blkio_count;
820
821 u32 swapin_count;
822
823
824 struct timespec freepages_start, freepages_end;
825 u64 freepages_delay;
826 u32 freepages_count;
827};
828#endif
829
830static inline int sched_info_on(void)
831{
832#ifdef CONFIG_SCHEDSTATS
833 return 1;
834#elif defined(CONFIG_TASK_DELAY_ACCT)
835 extern int delayacct_on;
836 return delayacct_on;
837#else
838 return 0;
839#endif
840}
841
842enum cpu_idle_type {
843 CPU_IDLE,
844 CPU_NOT_IDLE,
845 CPU_NEWLY_IDLE,
846 CPU_MAX_IDLE_TYPES
847};
848
849
850
851
852#define SCHED_CAPACITY_SHIFT 10
853#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
854
855
856
857
858#ifdef CONFIG_SMP
859#define SD_LOAD_BALANCE 0x0001
860#define SD_BALANCE_NEWIDLE 0x0002
861#define SD_BALANCE_EXEC 0x0004
862#define SD_BALANCE_FORK 0x0008
863#define SD_BALANCE_WAKE 0x0010
864#define SD_WAKE_AFFINE 0x0020
865#define SD_SHARE_CPUCAPACITY 0x0080
866#define SD_SHARE_POWERDOMAIN 0x0100
867#define SD_SHARE_PKG_RESOURCES 0x0200
868#define SD_SERIALIZE 0x0400
869#define SD_ASYM_PACKING 0x0800
870#define SD_PREFER_SIBLING 0x1000
871#define SD_OVERLAP 0x2000
872#define SD_NUMA 0x4000
873
874#ifdef CONFIG_SCHED_SMT
875static inline int cpu_smt_flags(void)
876{
877 return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
878}
879#endif
880
881#ifdef CONFIG_SCHED_MC
882static inline int cpu_core_flags(void)
883{
884 return SD_SHARE_PKG_RESOURCES;
885}
886#endif
887
888#ifdef CONFIG_NUMA
889static inline int cpu_numa_flags(void)
890{
891 return SD_NUMA;
892}
893#endif
894
895struct sched_domain_attr {
896 int relax_domain_level;
897};
898
899#define SD_ATTR_INIT (struct sched_domain_attr) { \
900 .relax_domain_level = -1, \
901}
902
903extern int sched_domain_level_max;
904
905struct sched_group;
906
907struct sched_domain {
908
909 struct sched_domain *parent;
910 struct sched_domain *child;
911 struct sched_group *groups;
912 unsigned long min_interval;
913 unsigned long max_interval;
914 unsigned int busy_factor;
915 unsigned int imbalance_pct;
916 unsigned int cache_nice_tries;
917 unsigned int busy_idx;
918 unsigned int idle_idx;
919 unsigned int newidle_idx;
920 unsigned int wake_idx;
921 unsigned int forkexec_idx;
922 unsigned int smt_gain;
923
924 int nohz_idle;
925 int flags;
926 int level;
927
928
929 unsigned long last_balance;
930 unsigned int balance_interval;
931 unsigned int nr_balance_failed;
932
933
934 u64 max_newidle_lb_cost;
935 unsigned long next_decay_max_lb_cost;
936
937#ifdef CONFIG_SCHEDSTATS
938
939 unsigned int lb_count[CPU_MAX_IDLE_TYPES];
940 unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
941 unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
942 unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
943 unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
944 unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
945 unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
946 unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
947
948
949 unsigned int alb_count;
950 unsigned int alb_failed;
951 unsigned int alb_pushed;
952
953
954 unsigned int sbe_count;
955 unsigned int sbe_balanced;
956 unsigned int sbe_pushed;
957
958
959 unsigned int sbf_count;
960 unsigned int sbf_balanced;
961 unsigned int sbf_pushed;
962
963
964 unsigned int ttwu_wake_remote;
965 unsigned int ttwu_move_affine;
966 unsigned int ttwu_move_balance;
967#endif
968#ifdef CONFIG_SCHED_DEBUG
969 char *name;
970#endif
971 union {
972 void *private;
973 struct rcu_head rcu;
974 };
975
976 unsigned int span_weight;
977
978
979
980
981
982
983
984 unsigned long span[0];
985};
986
987static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
988{
989 return to_cpumask(sd->span);
990}
991
992extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
993 struct sched_domain_attr *dattr_new);
994
995
996cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
997void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
998
999bool cpus_share_cache(int this_cpu, int that_cpu);
1000
1001typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
1002typedef int (*sched_domain_flags_f)(void);
1003
1004#define SDTL_OVERLAP 0x01
1005
1006struct sd_data {
1007 struct sched_domain **__percpu sd;
1008 struct sched_group **__percpu sg;
1009 struct sched_group_capacity **__percpu sgc;
1010};
1011
1012struct sched_domain_topology_level {
1013 sched_domain_mask_f mask;
1014 sched_domain_flags_f sd_flags;
1015 int flags;
1016 int numa_level;
1017 struct sd_data data;
1018#ifdef CONFIG_SCHED_DEBUG
1019 char *name;
1020#endif
1021};
1022
1023extern struct sched_domain_topology_level *sched_domain_topology;
1024
1025extern void set_sched_topology(struct sched_domain_topology_level *tl);
1026
1027#ifdef CONFIG_SCHED_DEBUG
1028# define SD_INIT_NAME(type) .name = #type
1029#else
1030# define SD_INIT_NAME(type)
1031#endif
1032
1033#else
1034
1035struct sched_domain_attr;
1036
1037static inline void
1038partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1039 struct sched_domain_attr *dattr_new)
1040{
1041}
1042
1043static inline bool cpus_share_cache(int this_cpu, int that_cpu)
1044{
1045 return true;
1046}
1047
1048#endif
1049
1050
1051struct io_context;
1052
1053
1054#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
1055extern void prefetch_stack(struct task_struct *t);
1056#else
1057static inline void prefetch_stack(struct task_struct *t) { }
1058#endif
1059
1060struct audit_context;
1061struct mempolicy;
1062struct pipe_inode_info;
1063struct uts_namespace;
1064
1065struct load_weight {
1066 unsigned long weight;
1067 u32 inv_weight;
1068};
1069
1070struct sched_avg {
1071
1072
1073
1074
1075
1076 u32 runnable_avg_sum, runnable_avg_period;
1077 u64 last_runnable_update;
1078 s64 decay_count;
1079 unsigned long load_avg_contrib;
1080};
1081
1082#ifdef CONFIG_SCHEDSTATS
1083struct sched_statistics {
1084 u64 wait_start;
1085 u64 wait_max;
1086 u64 wait_count;
1087 u64 wait_sum;
1088 u64 iowait_count;
1089 u64 iowait_sum;
1090
1091 u64 sleep_start;
1092 u64 sleep_max;
1093 s64 sum_sleep_runtime;
1094
1095 u64 block_start;
1096 u64 block_max;
1097 u64 exec_max;
1098 u64 slice_max;
1099
1100 u64 nr_migrations_cold;
1101 u64 nr_failed_migrations_affine;
1102 u64 nr_failed_migrations_running;
1103 u64 nr_failed_migrations_hot;
1104 u64 nr_forced_migrations;
1105
1106 u64 nr_wakeups;
1107 u64 nr_wakeups_sync;
1108 u64 nr_wakeups_migrate;
1109 u64 nr_wakeups_local;
1110 u64 nr_wakeups_remote;
1111 u64 nr_wakeups_affine;
1112 u64 nr_wakeups_affine_attempts;
1113 u64 nr_wakeups_passive;
1114 u64 nr_wakeups_idle;
1115};
1116#endif
1117
1118struct sched_entity {
1119 struct load_weight load;
1120 struct rb_node run_node;
1121 struct list_head group_node;
1122 unsigned int on_rq;
1123
1124 u64 exec_start;
1125 u64 sum_exec_runtime;
1126 u64 vruntime;
1127 u64 prev_sum_exec_runtime;
1128
1129 u64 nr_migrations;
1130
1131#ifdef CONFIG_SCHEDSTATS
1132 struct sched_statistics statistics;
1133#endif
1134
1135#ifdef CONFIG_FAIR_GROUP_SCHED
1136 int depth;
1137 struct sched_entity *parent;
1138
1139 struct cfs_rq *cfs_rq;
1140
1141 struct cfs_rq *my_q;
1142#endif
1143
1144#ifdef CONFIG_SMP
1145
1146 struct sched_avg avg;
1147#endif
1148};
1149
1150struct sched_rt_entity {
1151 struct list_head run_list;
1152 unsigned long timeout;
1153 unsigned long watchdog_stamp;
1154 unsigned int time_slice;
1155
1156 struct sched_rt_entity *back;
1157#ifdef CONFIG_RT_GROUP_SCHED
1158 struct sched_rt_entity *parent;
1159
1160 struct rt_rq *rt_rq;
1161
1162 struct rt_rq *my_q;
1163#endif
1164};
1165
1166struct sched_dl_entity {
1167 struct rb_node rb_node;
1168
1169
1170
1171
1172
1173
1174 u64 dl_runtime;
1175 u64 dl_deadline;
1176 u64 dl_period;
1177 u64 dl_bw;
1178
1179
1180
1181
1182
1183
1184 s64 runtime;
1185 u64 deadline;
1186 unsigned int flags;
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206 int dl_throttled, dl_new, dl_boosted, dl_yielded;
1207
1208
1209
1210
1211
1212 struct hrtimer dl_timer;
1213};
1214
1215struct rcu_node;
1216
1217enum perf_event_task_context {
1218 perf_invalid_context = -1,
1219 perf_hw_context = 0,
1220 perf_sw_context,
1221 perf_nr_task_contexts,
1222};
1223
1224struct task_struct {
1225 volatile long state;
1226 void *stack;
1227 atomic_t usage;
1228 unsigned int flags;
1229 unsigned int ptrace;
1230
1231#ifdef CONFIG_SMP
1232 struct llist_node wake_entry;
1233 int on_cpu;
1234 struct task_struct *last_wakee;
1235 unsigned long wakee_flips;
1236 unsigned long wakee_flip_decay_ts;
1237
1238 int wake_cpu;
1239#endif
1240 int on_rq;
1241
1242 int prio, static_prio, normal_prio;
1243 unsigned int rt_priority;
1244 const struct sched_class *sched_class;
1245 struct sched_entity se;
1246 struct sched_rt_entity rt;
1247#ifdef CONFIG_CGROUP_SCHED
1248 struct task_group *sched_task_group;
1249#endif
1250 struct sched_dl_entity dl;
1251
1252#ifdef CONFIG_PREEMPT_NOTIFIERS
1253
1254 struct hlist_head preempt_notifiers;
1255#endif
1256
1257#ifdef CONFIG_BLK_DEV_IO_TRACE
1258 unsigned int btrace_seq;
1259#endif
1260
1261 unsigned int policy;
1262 int nr_cpus_allowed;
1263 cpumask_t cpus_allowed;
1264
1265#ifdef CONFIG_PREEMPT_RCU
1266 int rcu_read_lock_nesting;
1267 char rcu_read_unlock_special;
1268 struct list_head rcu_node_entry;
1269#endif
1270#ifdef CONFIG_TREE_PREEMPT_RCU
1271 struct rcu_node *rcu_blocked_node;
1272#endif
1273#ifdef CONFIG_RCU_BOOST
1274 struct rt_mutex *rcu_boost_mutex;
1275#endif
1276
1277#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1278 struct sched_info sched_info;
1279#endif
1280
1281 struct list_head tasks;
1282#ifdef CONFIG_SMP
1283 struct plist_node pushable_tasks;
1284 struct rb_node pushable_dl_tasks;
1285#endif
1286
1287 struct mm_struct *mm, *active_mm;
1288#ifdef CONFIG_COMPAT_BRK
1289 unsigned brk_randomized:1;
1290#endif
1291
1292 u32 vmacache_seqnum;
1293 struct vm_area_struct *vmacache[VMACACHE_SIZE];
1294#if defined(SPLIT_RSS_COUNTING)
1295 struct task_rss_stat rss_stat;
1296#endif
1297
1298 int exit_state;
1299 int exit_code, exit_signal;
1300 int pdeath_signal;
1301 unsigned int jobctl;
1302
1303
1304 unsigned int personality;
1305
1306 unsigned in_execve:1;
1307
1308 unsigned in_iowait:1;
1309
1310
1311 unsigned no_new_privs:1;
1312
1313
1314 unsigned sched_reset_on_fork:1;
1315 unsigned sched_contributes_to_load:1;
1316
1317 pid_t pid;
1318 pid_t tgid;
1319
1320#ifdef CONFIG_CC_STACKPROTECTOR
1321
1322 unsigned long stack_canary;
1323#endif
1324
1325
1326
1327
1328
1329 struct task_struct __rcu *real_parent;
1330 struct task_struct __rcu *parent;
1331
1332
1333
1334 struct list_head children;
1335 struct list_head sibling;
1336 struct task_struct *group_leader;
1337
1338
1339
1340
1341
1342
1343 struct list_head ptraced;
1344 struct list_head ptrace_entry;
1345
1346
1347 struct pid_link pids[PIDTYPE_MAX];
1348 struct list_head thread_group;
1349 struct list_head thread_node;
1350
1351 struct completion *vfork_done;
1352 int __user *set_child_tid;
1353 int __user *clear_child_tid;
1354
1355 cputime_t utime, stime, utimescaled, stimescaled;
1356 cputime_t gtime;
1357#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
1358 struct cputime prev_cputime;
1359#endif
1360#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1361 seqlock_t vtime_seqlock;
1362 unsigned long long vtime_snap;
1363 enum {
1364 VTIME_SLEEPING = 0,
1365 VTIME_USER,
1366 VTIME_SYS,
1367 } vtime_snap_whence;
1368#endif
1369 unsigned long nvcsw, nivcsw;
1370 struct timespec start_time;
1371 struct timespec real_start_time;
1372
1373 unsigned long min_flt, maj_flt;
1374
1375 struct task_cputime cputime_expires;
1376 struct list_head cpu_timers[3];
1377
1378
1379 const struct cred __rcu *real_cred;
1380
1381 const struct cred __rcu *cred;
1382
1383 char comm[TASK_COMM_LEN];
1384
1385
1386
1387
1388 int link_count, total_link_count;
1389#ifdef CONFIG_SYSVIPC
1390
1391 struct sysv_sem sysvsem;
1392#endif
1393#ifdef CONFIG_DETECT_HUNG_TASK
1394
1395 unsigned long last_switch_count;
1396#endif
1397
1398 struct thread_struct thread;
1399
1400 struct fs_struct *fs;
1401
1402 struct files_struct *files;
1403
1404 struct nsproxy *nsproxy;
1405
1406 struct signal_struct *signal;
1407 struct sighand_struct *sighand;
1408
1409 sigset_t blocked, real_blocked;
1410 sigset_t saved_sigmask;
1411 struct sigpending pending;
1412
1413 unsigned long sas_ss_sp;
1414 size_t sas_ss_size;
1415 int (*notifier)(void *priv);
1416 void *notifier_data;
1417 sigset_t *notifier_mask;
1418 struct callback_head *task_works;
1419
1420 struct audit_context *audit_context;
1421#ifdef CONFIG_AUDITSYSCALL
1422 kuid_t loginuid;
1423 unsigned int sessionid;
1424#endif
1425 struct seccomp seccomp;
1426
1427
1428 u32 parent_exec_id;
1429 u32 self_exec_id;
1430
1431
1432 spinlock_t alloc_lock;
1433
1434
1435 raw_spinlock_t pi_lock;
1436
1437#ifdef CONFIG_RT_MUTEXES
1438
1439 struct rb_root pi_waiters;
1440 struct rb_node *pi_waiters_leftmost;
1441
1442 struct rt_mutex_waiter *pi_blocked_on;
1443
1444 struct task_struct *pi_top_task;
1445#endif
1446
1447#ifdef CONFIG_DEBUG_MUTEXES
1448
1449 struct mutex_waiter *blocked_on;
1450#endif
1451#ifdef CONFIG_TRACE_IRQFLAGS
1452 unsigned int irq_events;
1453 unsigned long hardirq_enable_ip;
1454 unsigned long hardirq_disable_ip;
1455 unsigned int hardirq_enable_event;
1456 unsigned int hardirq_disable_event;
1457 int hardirqs_enabled;
1458 int hardirq_context;
1459 unsigned long softirq_disable_ip;
1460 unsigned long softirq_enable_ip;
1461 unsigned int softirq_disable_event;
1462 unsigned int softirq_enable_event;
1463 int softirqs_enabled;
1464 int softirq_context;
1465#endif
1466#ifdef CONFIG_LOCKDEP
1467# define MAX_LOCK_DEPTH 48UL
1468 u64 curr_chain_key;
1469 int lockdep_depth;
1470 unsigned int lockdep_recursion;
1471 struct held_lock held_locks[MAX_LOCK_DEPTH];
1472 gfp_t lockdep_reclaim_gfp;
1473#endif
1474
1475
1476 void *journal_info;
1477
1478
1479 struct bio_list *bio_list;
1480
1481#ifdef CONFIG_BLOCK
1482
1483 struct blk_plug *plug;
1484#endif
1485
1486
1487 struct reclaim_state *reclaim_state;
1488
1489 struct backing_dev_info *backing_dev_info;
1490
1491 struct io_context *io_context;
1492
1493 unsigned long ptrace_message;
1494 siginfo_t *last_siginfo;
1495 struct task_io_accounting ioac;
1496#if defined(CONFIG_TASK_XACCT)
1497 u64 acct_rss_mem1;
1498 u64 acct_vm_mem1;
1499 cputime_t acct_timexpd;
1500#endif
1501#ifdef CONFIG_CPUSETS
1502 nodemask_t mems_allowed;
1503 seqcount_t mems_allowed_seq;
1504 int cpuset_mem_spread_rotor;
1505 int cpuset_slab_spread_rotor;
1506#endif
1507#ifdef CONFIG_CGROUPS
1508
1509 struct css_set __rcu *cgroups;
1510
1511 struct list_head cg_list;
1512#endif
1513#ifdef CONFIG_FUTEX
1514 struct robust_list_head __user *robust_list;
1515#ifdef CONFIG_COMPAT
1516 struct compat_robust_list_head __user *compat_robust_list;
1517#endif
1518 struct list_head pi_state_list;
1519 struct futex_pi_state *pi_state_cache;
1520#endif
1521#ifdef CONFIG_PERF_EVENTS
1522 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1523 struct mutex perf_event_mutex;
1524 struct list_head perf_event_list;
1525#endif
1526#ifdef CONFIG_DEBUG_PREEMPT
1527 unsigned long preempt_disable_ip;
1528#endif
1529#ifdef CONFIG_NUMA
1530 struct mempolicy *mempolicy;
1531 short il_next;
1532 short pref_node_fork;
1533#endif
1534#ifdef CONFIG_NUMA_BALANCING
1535 int numa_scan_seq;
1536 unsigned int numa_scan_period;
1537 unsigned int numa_scan_period_max;
1538 int numa_preferred_nid;
1539 unsigned long numa_migrate_retry;
1540 u64 node_stamp;
1541 u64 last_task_numa_placement;
1542 u64 last_sum_exec_runtime;
1543 struct callback_head numa_work;
1544
1545 struct list_head numa_entry;
1546 struct numa_group *numa_group;
1547
1548
1549
1550
1551
1552
1553 unsigned long *numa_faults_memory;
1554 unsigned long total_numa_faults;
1555
1556
1557
1558
1559
1560
1561 unsigned long *numa_faults_buffer_memory;
1562
1563
1564
1565
1566
1567 unsigned long *numa_faults_cpu;
1568 unsigned long *numa_faults_buffer_cpu;
1569
1570
1571
1572
1573
1574
1575
1576 unsigned long numa_faults_locality[2];
1577
1578 unsigned long numa_pages_migrated;
1579#endif
1580
1581 struct rcu_head rcu;
1582
1583
1584
1585
1586 struct pipe_inode_info *splice_pipe;
1587
1588 struct page_frag task_frag;
1589
1590#ifdef CONFIG_TASK_DELAY_ACCT
1591 struct task_delay_info *delays;
1592#endif
1593#ifdef CONFIG_FAULT_INJECTION
1594 int make_it_fail;
1595#endif
1596
1597
1598
1599
1600 int nr_dirtied;
1601 int nr_dirtied_pause;
1602 unsigned long dirty_paused_when;
1603
1604#ifdef CONFIG_LATENCYTOP
1605 int latency_record_count;
1606 struct latency_record latency_record[LT_SAVECOUNT];
1607#endif
1608
1609
1610
1611
1612 unsigned long timer_slack_ns;
1613 unsigned long default_timer_slack_ns;
1614
1615#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1616
1617 int curr_ret_stack;
1618
1619 struct ftrace_ret_stack *ret_stack;
1620
1621 unsigned long long ftrace_timestamp;
1622
1623
1624
1625
1626 atomic_t trace_overrun;
1627
1628 atomic_t tracing_graph_pause;
1629#endif
1630#ifdef CONFIG_TRACING
1631
1632 unsigned long trace;
1633
1634 unsigned long trace_recursion;
1635#endif
1636#ifdef CONFIG_MEMCG
1637 struct memcg_batch_info {
1638 int do_batch;
1639 struct mem_cgroup *memcg;
1640 unsigned long nr_pages;
1641 unsigned long memsw_nr_pages;
1642 } memcg_batch;
1643 unsigned int memcg_kmem_skip_account;
1644 struct memcg_oom_info {
1645 struct mem_cgroup *memcg;
1646 gfp_t gfp_mask;
1647 int order;
1648 unsigned int may_oom:1;
1649 } memcg_oom;
1650#endif
1651#ifdef CONFIG_UPROBES
1652 struct uprobe_task *utask;
1653#endif
1654#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1655 unsigned int sequential_io;
1656 unsigned int sequential_io_avg;
1657#endif
1658};
1659
1660
1661#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1662
1663#define TNF_MIGRATED 0x01
1664#define TNF_NO_GROUP 0x02
1665#define TNF_SHARED 0x04
1666#define TNF_FAULT_LOCAL 0x08
1667
1668#ifdef CONFIG_NUMA_BALANCING
1669extern void task_numa_fault(int last_node, int node, int pages, int flags);
1670extern pid_t task_numa_group_id(struct task_struct *p);
1671extern void set_numabalancing_state(bool enabled);
1672extern void task_numa_free(struct task_struct *p);
1673extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
1674 int src_nid, int dst_cpu);
1675#else
1676static inline void task_numa_fault(int last_node, int node, int pages,
1677 int flags)
1678{
1679}
1680static inline pid_t task_numa_group_id(struct task_struct *p)
1681{
1682 return 0;
1683}
1684static inline void set_numabalancing_state(bool enabled)
1685{
1686}
1687static inline void task_numa_free(struct task_struct *p)
1688{
1689}
1690static inline bool should_numa_migrate_memory(struct task_struct *p,
1691 struct page *page, int src_nid, int dst_cpu)
1692{
1693 return true;
1694}
1695#endif
1696
1697static inline struct pid *task_pid(struct task_struct *task)
1698{
1699 return task->pids[PIDTYPE_PID].pid;
1700}
1701
1702static inline struct pid *task_tgid(struct task_struct *task)
1703{
1704 return task->group_leader->pids[PIDTYPE_PID].pid;
1705}
1706
1707
1708
1709
1710
1711
1712static inline struct pid *task_pgrp(struct task_struct *task)
1713{
1714 return task->group_leader->pids[PIDTYPE_PGID].pid;
1715}
1716
1717static inline struct pid *task_session(struct task_struct *task)
1718{
1719 return task->group_leader->pids[PIDTYPE_SID].pid;
1720}
1721
1722struct pid_namespace;
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1738 struct pid_namespace *ns);
1739
1740static inline pid_t task_pid_nr(struct task_struct *tsk)
1741{
1742 return tsk->pid;
1743}
1744
1745static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
1746 struct pid_namespace *ns)
1747{
1748 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1749}
1750
1751static inline pid_t task_pid_vnr(struct task_struct *tsk)
1752{
1753 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1754}
1755
1756
1757static inline pid_t task_tgid_nr(struct task_struct *tsk)
1758{
1759 return tsk->tgid;
1760}
1761
1762pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1763
1764static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1765{
1766 return pid_vnr(task_tgid(tsk));
1767}
1768
1769
1770static inline int pid_alive(const struct task_struct *p);
1771static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1772{
1773 pid_t pid = 0;
1774
1775 rcu_read_lock();
1776 if (pid_alive(tsk))
1777 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1778 rcu_read_unlock();
1779
1780 return pid;
1781}
1782
1783static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1784{
1785 return task_ppid_nr_ns(tsk, &init_pid_ns);
1786}
1787
1788static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
1789 struct pid_namespace *ns)
1790{
1791 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1792}
1793
1794static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1795{
1796 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1797}
1798
1799
1800static inline pid_t task_session_nr_ns(struct task_struct *tsk,
1801 struct pid_namespace *ns)
1802{
1803 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1804}
1805
1806static inline pid_t task_session_vnr(struct task_struct *tsk)
1807{
1808 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1809}
1810
1811
1812static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1813{
1814 return task_pgrp_nr_ns(tsk, &init_pid_ns);
1815}
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827static inline int pid_alive(const struct task_struct *p)
1828{
1829 return p->pids[PIDTYPE_PID].pid != NULL;
1830}
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840static inline int is_global_init(struct task_struct *tsk)
1841{
1842 return tsk->pid == 1;
1843}
1844
1845extern struct pid *cad_pid;
1846
1847extern void free_task(struct task_struct *tsk);
1848#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
1849
1850extern void __put_task_struct(struct task_struct *t);
1851
1852static inline void put_task_struct(struct task_struct *t)
1853{
1854 if (atomic_dec_and_test(&t->usage))
1855 __put_task_struct(t);
1856}
1857
1858#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1859extern void task_cputime(struct task_struct *t,
1860 cputime_t *utime, cputime_t *stime);
1861extern void task_cputime_scaled(struct task_struct *t,
1862 cputime_t *utimescaled, cputime_t *stimescaled);
1863extern cputime_t task_gtime(struct task_struct *t);
1864#else
1865static inline void task_cputime(struct task_struct *t,
1866 cputime_t *utime, cputime_t *stime)
1867{
1868 if (utime)
1869 *utime = t->utime;
1870 if (stime)
1871 *stime = t->stime;
1872}
1873
1874static inline void task_cputime_scaled(struct task_struct *t,
1875 cputime_t *utimescaled,
1876 cputime_t *stimescaled)
1877{
1878 if (utimescaled)
1879 *utimescaled = t->utimescaled;
1880 if (stimescaled)
1881 *stimescaled = t->stimescaled;
1882}
1883
1884static inline cputime_t task_gtime(struct task_struct *t)
1885{
1886 return t->gtime;
1887}
1888#endif
1889extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1890extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1891
1892
1893
1894
1895#define PF_EXITING 0x00000004
1896#define PF_EXITPIDONE 0x00000008
1897#define PF_VCPU 0x00000010
1898#define PF_WQ_WORKER 0x00000020
1899#define PF_FORKNOEXEC 0x00000040
1900#define PF_MCE_PROCESS 0x00000080
1901#define PF_SUPERPRIV 0x00000100
1902#define PF_DUMPCORE 0x00000200
1903#define PF_SIGNALED 0x00000400
1904#define PF_MEMALLOC 0x00000800
1905#define PF_NPROC_EXCEEDED 0x00001000
1906#define PF_USED_MATH 0x00002000
1907#define PF_USED_ASYNC 0x00004000
1908#define PF_NOFREEZE 0x00008000
1909#define PF_FROZEN 0x00010000
1910#define PF_FSTRANS 0x00020000
1911#define PF_KSWAPD 0x00040000
1912#define PF_MEMALLOC_NOIO 0x00080000
1913#define PF_LESS_THROTTLE 0x00100000
1914#define PF_KTHREAD 0x00200000
1915#define PF_RANDOMIZE 0x00400000
1916#define PF_SWAPWRITE 0x00800000
1917#define PF_SPREAD_PAGE 0x01000000
1918#define PF_SPREAD_SLAB 0x02000000
1919#define PF_NO_SETAFFINITY 0x04000000
1920#define PF_MCE_EARLY 0x08000000
1921#define PF_MUTEX_TESTER 0x20000000
1922#define PF_FREEZER_SKIP 0x40000000
1923#define PF_SUSPEND_TASK 0x80000000
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1937#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1938#define clear_used_math() clear_stopped_child_used_math(current)
1939#define set_used_math() set_stopped_child_used_math(current)
1940#define conditional_stopped_child_used_math(condition, child) \
1941 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1942#define conditional_used_math(condition) \
1943 conditional_stopped_child_used_math(condition, current)
1944#define copy_to_stopped_child_used_math(child) \
1945 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1946
1947#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1948#define used_math() tsk_used_math(current)
1949
1950
1951static inline gfp_t memalloc_noio_flags(gfp_t flags)
1952{
1953 if (unlikely(current->flags & PF_MEMALLOC_NOIO))
1954 flags &= ~__GFP_IO;
1955 return flags;
1956}
1957
1958static inline unsigned int memalloc_noio_save(void)
1959{
1960 unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
1961 current->flags |= PF_MEMALLOC_NOIO;
1962 return flags;
1963}
1964
1965static inline void memalloc_noio_restore(unsigned int flags)
1966{
1967 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
1968}
1969
1970
1971
1972
1973#define JOBCTL_STOP_SIGMASK 0xffff
1974
1975#define JOBCTL_STOP_DEQUEUED_BIT 16
1976#define JOBCTL_STOP_PENDING_BIT 17
1977#define JOBCTL_STOP_CONSUME_BIT 18
1978#define JOBCTL_TRAP_STOP_BIT 19
1979#define JOBCTL_TRAP_NOTIFY_BIT 20
1980#define JOBCTL_TRAPPING_BIT 21
1981#define JOBCTL_LISTENING_BIT 22
1982
1983#define JOBCTL_STOP_DEQUEUED (1 << JOBCTL_STOP_DEQUEUED_BIT)
1984#define JOBCTL_STOP_PENDING (1 << JOBCTL_STOP_PENDING_BIT)
1985#define JOBCTL_STOP_CONSUME (1 << JOBCTL_STOP_CONSUME_BIT)
1986#define JOBCTL_TRAP_STOP (1 << JOBCTL_TRAP_STOP_BIT)
1987#define JOBCTL_TRAP_NOTIFY (1 << JOBCTL_TRAP_NOTIFY_BIT)
1988#define JOBCTL_TRAPPING (1 << JOBCTL_TRAPPING_BIT)
1989#define JOBCTL_LISTENING (1 << JOBCTL_LISTENING_BIT)
1990
1991#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
1992#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
1993
1994extern bool task_set_jobctl_pending(struct task_struct *task,
1995 unsigned int mask);
1996extern void task_clear_jobctl_trapping(struct task_struct *task);
1997extern void task_clear_jobctl_pending(struct task_struct *task,
1998 unsigned int mask);
1999
2000#ifdef CONFIG_PREEMPT_RCU
2001
2002#define RCU_READ_UNLOCK_BLOCKED (1 << 0)
2003#define RCU_READ_UNLOCK_NEED_QS (1 << 1)
2004
2005static inline void rcu_copy_process(struct task_struct *p)
2006{
2007 p->rcu_read_lock_nesting = 0;
2008 p->rcu_read_unlock_special = 0;
2009#ifdef CONFIG_TREE_PREEMPT_RCU
2010 p->rcu_blocked_node = NULL;
2011#endif
2012#ifdef CONFIG_RCU_BOOST
2013 p->rcu_boost_mutex = NULL;
2014#endif
2015 INIT_LIST_HEAD(&p->rcu_node_entry);
2016}
2017
2018#else
2019
2020static inline void rcu_copy_process(struct task_struct *p)
2021{
2022}
2023
2024#endif
2025
2026static inline void tsk_restore_flags(struct task_struct *task,
2027 unsigned long orig_flags, unsigned long flags)
2028{
2029 task->flags &= ~flags;
2030 task->flags |= orig_flags & flags;
2031}
2032
2033#ifdef CONFIG_SMP
2034extern void do_set_cpus_allowed(struct task_struct *p,
2035 const struct cpumask *new_mask);
2036
2037extern int set_cpus_allowed_ptr(struct task_struct *p,
2038 const struct cpumask *new_mask);
2039#else
2040static inline void do_set_cpus_allowed(struct task_struct *p,
2041 const struct cpumask *new_mask)
2042{
2043}
2044static inline int set_cpus_allowed_ptr(struct task_struct *p,
2045 const struct cpumask *new_mask)
2046{
2047 if (!cpumask_test_cpu(0, new_mask))
2048 return -EINVAL;
2049 return 0;
2050}
2051#endif
2052
2053#ifdef CONFIG_NO_HZ_COMMON
2054void calc_load_enter_idle(void);
2055void calc_load_exit_idle(void);
2056#else
2057static inline void calc_load_enter_idle(void) { }
2058static inline void calc_load_exit_idle(void) { }
2059#endif
2060
2061#ifndef CONFIG_CPUMASK_OFFSTACK
2062static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
2063{
2064 return set_cpus_allowed_ptr(p, &new_mask);
2065}
2066#endif
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076extern unsigned long long notrace sched_clock(void);
2077
2078
2079
2080extern u64 cpu_clock(int cpu);
2081extern u64 local_clock(void);
2082extern u64 sched_clock_cpu(int cpu);
2083
2084
2085extern void sched_clock_init(void);
2086
2087#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2088static inline void sched_clock_tick(void)
2089{
2090}
2091
2092static inline void sched_clock_idle_sleep_event(void)
2093{
2094}
2095
2096static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
2097{
2098}
2099#else
2100
2101
2102
2103
2104
2105
2106extern int sched_clock_stable(void);
2107extern void set_sched_clock_stable(void);
2108extern void clear_sched_clock_stable(void);
2109
2110extern void sched_clock_tick(void);
2111extern void sched_clock_idle_sleep_event(void);
2112extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2113#endif
2114
2115#ifdef CONFIG_IRQ_TIME_ACCOUNTING
2116
2117
2118
2119
2120
2121extern void enable_sched_clock_irqtime(void);
2122extern void disable_sched_clock_irqtime(void);
2123#else
2124static inline void enable_sched_clock_irqtime(void) {}
2125static inline void disable_sched_clock_irqtime(void) {}
2126#endif
2127
2128extern unsigned long long
2129task_sched_runtime(struct task_struct *task);
2130
2131
2132#ifdef CONFIG_SMP
2133extern void sched_exec(void);
2134#else
2135#define sched_exec() {}
2136#endif
2137
2138extern void sched_clock_idle_sleep_event(void);
2139extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2140
2141#ifdef CONFIG_HOTPLUG_CPU
2142extern void idle_task_exit(void);
2143#else
2144static inline void idle_task_exit(void) {}
2145#endif
2146
2147#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
2148extern void wake_up_nohz_cpu(int cpu);
2149#else
2150static inline void wake_up_nohz_cpu(int cpu) { }
2151#endif
2152
2153#ifdef CONFIG_NO_HZ_FULL
2154extern bool sched_can_stop_tick(void);
2155extern u64 scheduler_tick_max_deferment(void);
2156#else
2157static inline bool sched_can_stop_tick(void) { return false; }
2158#endif
2159
2160#ifdef CONFIG_SCHED_AUTOGROUP
2161extern void sched_autogroup_create_attach(struct task_struct *p);
2162extern void sched_autogroup_detach(struct task_struct *p);
2163extern void sched_autogroup_fork(struct signal_struct *sig);
2164extern void sched_autogroup_exit(struct signal_struct *sig);
2165#ifdef CONFIG_PROC_FS
2166extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
2167extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
2168#endif
2169#else
2170static inline void sched_autogroup_create_attach(struct task_struct *p) { }
2171static inline void sched_autogroup_detach(struct task_struct *p) { }
2172static inline void sched_autogroup_fork(struct signal_struct *sig) { }
2173static inline void sched_autogroup_exit(struct signal_struct *sig) { }
2174#endif
2175
2176extern int yield_to(struct task_struct *p, bool preempt);
2177extern void set_user_nice(struct task_struct *p, long nice);
2178extern int task_prio(const struct task_struct *p);
2179
2180
2181
2182
2183
2184
2185static inline int task_nice(const struct task_struct *p)
2186{
2187 return PRIO_TO_NICE((p)->static_prio);
2188}
2189extern int can_nice(const struct task_struct *p, const int nice);
2190extern int task_curr(const struct task_struct *p);
2191extern int idle_cpu(int cpu);
2192extern int sched_setscheduler(struct task_struct *, int,
2193 const struct sched_param *);
2194extern int sched_setscheduler_nocheck(struct task_struct *, int,
2195 const struct sched_param *);
2196extern int sched_setattr(struct task_struct *,
2197 const struct sched_attr *);
2198extern struct task_struct *idle_task(int cpu);
2199
2200
2201
2202
2203
2204
2205static inline bool is_idle_task(const struct task_struct *p)
2206{
2207 return p->pid == 0;
2208}
2209extern struct task_struct *curr_task(int cpu);
2210extern void set_curr_task(int cpu, struct task_struct *p);
2211
2212void yield(void);
2213
2214
2215
2216
2217extern struct exec_domain default_exec_domain;
2218
2219union thread_union {
2220 struct thread_info thread_info;
2221 unsigned long stack[THREAD_SIZE/sizeof(long)];
2222};
2223
2224#ifndef __HAVE_ARCH_KSTACK_END
2225static inline int kstack_end(void *addr)
2226{
2227
2228
2229
2230 return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
2231}
2232#endif
2233
2234extern union thread_union init_thread_union;
2235extern struct task_struct init_task;
2236
2237extern struct mm_struct init_mm;
2238
2239extern struct pid_namespace init_pid_ns;
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252extern struct task_struct *find_task_by_vpid(pid_t nr);
2253extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2254 struct pid_namespace *ns);
2255
2256
2257extern struct user_struct * alloc_uid(kuid_t);
2258static inline struct user_struct *get_uid(struct user_struct *u)
2259{
2260 atomic_inc(&u->__count);
2261 return u;
2262}
2263extern void free_uid(struct user_struct *);
2264
2265#include <asm/current.h>
2266
2267extern void xtime_update(unsigned long ticks);
2268
2269extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2270extern int wake_up_process(struct task_struct *tsk);
2271extern void wake_up_new_task(struct task_struct *tsk);
2272#ifdef CONFIG_SMP
2273 extern void kick_process(struct task_struct *tsk);
2274#else
2275 static inline void kick_process(struct task_struct *tsk) { }
2276#endif
2277extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
2278extern void sched_dead(struct task_struct *p);
2279
2280extern void proc_caches_init(void);
2281extern void flush_signals(struct task_struct *);
2282extern void __flush_signals(struct task_struct *);
2283extern void ignore_signals(struct task_struct *);
2284extern void flush_signal_handlers(struct task_struct *, int force_default);
2285extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
2286
2287static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
2288{
2289 unsigned long flags;
2290 int ret;
2291
2292 spin_lock_irqsave(&tsk->sighand->siglock, flags);
2293 ret = dequeue_signal(tsk, mask, info);
2294 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
2295
2296 return ret;
2297}
2298
2299extern void block_all_signals(int (*notifier)(void *priv), void *priv,
2300 sigset_t *mask);
2301extern void unblock_all_signals(void);
2302extern void release_task(struct task_struct * p);
2303extern int send_sig_info(int, struct siginfo *, struct task_struct *);
2304extern int force_sigsegv(int, struct task_struct *);
2305extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2306extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
2307extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2308extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
2309 const struct cred *, u32);
2310extern int kill_pgrp(struct pid *pid, int sig, int priv);
2311extern int kill_pid(struct pid *pid, int sig, int priv);
2312extern int kill_proc_info(int, struct siginfo *, pid_t);
2313extern __must_check bool do_notify_parent(struct task_struct *, int);
2314extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2315extern void force_sig(int, struct task_struct *);
2316extern int send_sig(int, struct task_struct *, int);
2317extern int zap_other_threads(struct task_struct *p);
2318extern struct sigqueue *sigqueue_alloc(void);
2319extern void sigqueue_free(struct sigqueue *);
2320extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
2321extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
2322
2323static inline void restore_saved_sigmask(void)
2324{
2325 if (test_and_clear_restore_sigmask())
2326 __set_current_blocked(¤t->saved_sigmask);
2327}
2328
2329static inline sigset_t *sigmask_to_save(void)
2330{
2331 sigset_t *res = ¤t->blocked;
2332 if (unlikely(test_restore_sigmask()))
2333 res = ¤t->saved_sigmask;
2334 return res;
2335}
2336
2337static inline int kill_cad_pid(int sig, int priv)
2338{
2339 return kill_pid(cad_pid, sig, priv);
2340}
2341
2342
2343#define SEND_SIG_NOINFO ((struct siginfo *) 0)
2344#define SEND_SIG_PRIV ((struct siginfo *) 1)
2345#define SEND_SIG_FORCED ((struct siginfo *) 2)
2346
2347
2348
2349
2350static inline int on_sig_stack(unsigned long sp)
2351{
2352#ifdef CONFIG_STACK_GROWSUP
2353 return sp >= current->sas_ss_sp &&
2354 sp - current->sas_ss_sp < current->sas_ss_size;
2355#else
2356 return sp > current->sas_ss_sp &&
2357 sp - current->sas_ss_sp <= current->sas_ss_size;
2358#endif
2359}
2360
2361static inline int sas_ss_flags(unsigned long sp)
2362{
2363 return (current->sas_ss_size == 0 ? SS_DISABLE
2364 : on_sig_stack(sp) ? SS_ONSTACK : 0);
2365}
2366
2367static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
2368{
2369 if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
2370#ifdef CONFIG_STACK_GROWSUP
2371 return current->sas_ss_sp;
2372#else
2373 return current->sas_ss_sp + current->sas_ss_size;
2374#endif
2375 return sp;
2376}
2377
2378
2379
2380
2381extern struct mm_struct * mm_alloc(void);
2382
2383
2384extern void __mmdrop(struct mm_struct *);
2385static inline void mmdrop(struct mm_struct * mm)
2386{
2387 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
2388 __mmdrop(mm);
2389}
2390
2391
2392extern void mmput(struct mm_struct *);
2393
2394extern struct mm_struct *get_task_mm(struct task_struct *task);
2395
2396
2397
2398
2399
2400extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
2401
2402extern void mm_release(struct task_struct *, struct mm_struct *);
2403
2404extern int copy_thread(unsigned long, unsigned long, unsigned long,
2405 struct task_struct *);
2406extern void flush_thread(void);
2407extern void exit_thread(void);
2408
2409extern void exit_files(struct task_struct *);
2410extern void __cleanup_sighand(struct sighand_struct *);
2411
2412extern void exit_itimers(struct signal_struct *);
2413extern void flush_itimer_signals(void);
2414
2415extern void do_group_exit(int);
2416
2417extern int do_execve(struct filename *,
2418 const char __user * const __user *,
2419 const char __user * const __user *);
2420extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
2421struct task_struct *fork_idle(int);
2422extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
2423
2424extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
2425static inline void set_task_comm(struct task_struct *tsk, const char *from)
2426{
2427 __set_task_comm(tsk, from, false);
2428}
2429extern char *get_task_comm(char *to, struct task_struct *tsk);
2430
2431#ifdef CONFIG_SMP
2432void scheduler_ipi(void);
2433extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
2434#else
2435static inline void scheduler_ipi(void) { }
2436static inline unsigned long wait_task_inactive(struct task_struct *p,
2437 long match_state)
2438{
2439 return 1;
2440}
2441#endif
2442
2443#define next_task(p) \
2444 list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
2445
2446#define for_each_process(p) \
2447 for (p = &init_task ; (p = next_task(p)) != &init_task ; )
2448
2449extern bool current_is_single_threaded(void);
2450
2451
2452
2453
2454
2455#define do_each_thread(g, t) \
2456 for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
2457
2458#define while_each_thread(g, t) \
2459 while ((t = next_thread(t)) != g)
2460
2461#define __for_each_thread(signal, t) \
2462 list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
2463
2464#define for_each_thread(p, t) \
2465 __for_each_thread((p)->signal, t)
2466
2467
2468#define for_each_process_thread(p, t) \
2469 for_each_process(p) for_each_thread(p, t)
2470
2471static inline int get_nr_threads(struct task_struct *tsk)
2472{
2473 return tsk->signal->nr_threads;
2474}
2475
2476static inline bool thread_group_leader(struct task_struct *p)
2477{
2478 return p->exit_signal >= 0;
2479}
2480
2481
2482
2483
2484
2485
2486
2487static inline bool has_group_leader_pid(struct task_struct *p)
2488{
2489 return task_pid(p) == p->signal->leader_pid;
2490}
2491
2492static inline
2493bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
2494{
2495 return p1->signal == p2->signal;
2496}
2497
2498static inline struct task_struct *next_thread(const struct task_struct *p)
2499{
2500 return list_entry_rcu(p->thread_group.next,
2501 struct task_struct, thread_group);
2502}
2503
2504static inline int thread_group_empty(struct task_struct *p)
2505{
2506 return list_empty(&p->thread_group);
2507}
2508
2509#define delay_group_leader(p) \
2510 (thread_group_leader(p) && !thread_group_empty(p))
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522static inline void task_lock(struct task_struct *p)
2523{
2524 spin_lock(&p->alloc_lock);
2525}
2526
2527static inline void task_unlock(struct task_struct *p)
2528{
2529 spin_unlock(&p->alloc_lock);
2530}
2531
2532extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2533 unsigned long *flags);
2534
2535static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
2536 unsigned long *flags)
2537{
2538 struct sighand_struct *ret;
2539
2540 ret = __lock_task_sighand(tsk, flags);
2541 (void)__cond_lock(&tsk->sighand->siglock, ret);
2542 return ret;
2543}
2544
2545static inline void unlock_task_sighand(struct task_struct *tsk,
2546 unsigned long *flags)
2547{
2548 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2549}
2550
2551#ifdef CONFIG_CGROUPS
2552static inline void threadgroup_change_begin(struct task_struct *tsk)
2553{
2554 down_read(&tsk->signal->group_rwsem);
2555}
2556static inline void threadgroup_change_end(struct task_struct *tsk)
2557{
2558 up_read(&tsk->signal->group_rwsem);
2559}
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577static inline void threadgroup_lock(struct task_struct *tsk)
2578{
2579 down_write(&tsk->signal->group_rwsem);
2580}
2581
2582
2583
2584
2585
2586
2587
2588static inline void threadgroup_unlock(struct task_struct *tsk)
2589{
2590 up_write(&tsk->signal->group_rwsem);
2591}
2592#else
2593static inline void threadgroup_change_begin(struct task_struct *tsk) {}
2594static inline void threadgroup_change_end(struct task_struct *tsk) {}
2595static inline void threadgroup_lock(struct task_struct *tsk) {}
2596static inline void threadgroup_unlock(struct task_struct *tsk) {}
2597#endif
2598
2599#ifndef __HAVE_THREAD_FUNCTIONS
2600
2601#define task_thread_info(task) ((struct thread_info *)(task)->stack)
2602#define task_stack_page(task) ((task)->stack)
2603
2604static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
2605{
2606 *task_thread_info(p) = *task_thread_info(org);
2607 task_thread_info(p)->task = p;
2608}
2609
2610static inline unsigned long *end_of_stack(struct task_struct *p)
2611{
2612 return (unsigned long *)(task_thread_info(p) + 1);
2613}
2614
2615#endif
2616
2617static inline int object_is_on_stack(void *obj)
2618{
2619 void *stack = task_stack_page(current);
2620
2621 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
2622}
2623
2624extern void thread_info_cache_init(void);
2625
2626#ifdef CONFIG_DEBUG_STACK_USAGE
2627static inline unsigned long stack_not_used(struct task_struct *p)
2628{
2629 unsigned long *n = end_of_stack(p);
2630
2631 do {
2632 n++;
2633 } while (!*n);
2634
2635 return (unsigned long)n - (unsigned long)end_of_stack(p);
2636}
2637#endif
2638
2639
2640
2641
2642static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
2643{
2644 set_ti_thread_flag(task_thread_info(tsk), flag);
2645}
2646
2647static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2648{
2649 clear_ti_thread_flag(task_thread_info(tsk), flag);
2650}
2651
2652static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
2653{
2654 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
2655}
2656
2657static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2658{
2659 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
2660}
2661
2662static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2663{
2664 return test_ti_thread_flag(task_thread_info(tsk), flag);
2665}
2666
2667static inline void set_tsk_need_resched(struct task_struct *tsk)
2668{
2669 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2670}
2671
2672static inline void clear_tsk_need_resched(struct task_struct *tsk)
2673{
2674 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2675}
2676
2677static inline int test_tsk_need_resched(struct task_struct *tsk)
2678{
2679 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2680}
2681
2682static inline int restart_syscall(void)
2683{
2684 set_tsk_thread_flag(current, TIF_SIGPENDING);
2685 return -ERESTARTNOINTR;
2686}
2687
2688static inline int signal_pending(struct task_struct *p)
2689{
2690 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
2691}
2692
2693static inline int __fatal_signal_pending(struct task_struct *p)
2694{
2695 return unlikely(sigismember(&p->pending.signal, SIGKILL));
2696}
2697
2698static inline int fatal_signal_pending(struct task_struct *p)
2699{
2700 return signal_pending(p) && __fatal_signal_pending(p);
2701}
2702
2703static inline int signal_pending_state(long state, struct task_struct *p)
2704{
2705 if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
2706 return 0;
2707 if (!signal_pending(p))
2708 return 0;
2709
2710 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
2711}
2712
2713
2714
2715
2716
2717
2718
2719
2720extern int _cond_resched(void);
2721
2722#define cond_resched() ({ \
2723 __might_sleep(__FILE__, __LINE__, 0); \
2724 _cond_resched(); \
2725})
2726
2727extern int __cond_resched_lock(spinlock_t *lock);
2728
2729#ifdef CONFIG_PREEMPT_COUNT
2730#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
2731#else
2732#define PREEMPT_LOCK_OFFSET 0
2733#endif
2734
2735#define cond_resched_lock(lock) ({ \
2736 __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
2737 __cond_resched_lock(lock); \
2738})
2739
2740extern int __cond_resched_softirq(void);
2741
2742#define cond_resched_softirq() ({ \
2743 __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
2744 __cond_resched_softirq(); \
2745})
2746
2747static inline void cond_resched_rcu(void)
2748{
2749#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
2750 rcu_read_unlock();
2751 cond_resched();
2752 rcu_read_lock();
2753#endif
2754}
2755
2756
2757
2758
2759
2760
2761static inline int spin_needbreak(spinlock_t *lock)
2762{
2763#ifdef CONFIG_PREEMPT
2764 return spin_is_contended(lock);
2765#else
2766 return 0;
2767#endif
2768}
2769
2770
2771
2772
2773
2774#ifdef TIF_POLLING_NRFLAG
2775static inline int tsk_is_polling(struct task_struct *p)
2776{
2777 return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
2778}
2779
2780static inline void __current_set_polling(void)
2781{
2782 set_thread_flag(TIF_POLLING_NRFLAG);
2783}
2784
2785static inline bool __must_check current_set_polling_and_test(void)
2786{
2787 __current_set_polling();
2788
2789
2790
2791
2792
2793 smp_mb__after_atomic();
2794
2795 return unlikely(tif_need_resched());
2796}
2797
2798static inline void __current_clr_polling(void)
2799{
2800 clear_thread_flag(TIF_POLLING_NRFLAG);
2801}
2802
2803static inline bool __must_check current_clr_polling_and_test(void)
2804{
2805 __current_clr_polling();
2806
2807
2808
2809
2810
2811 smp_mb__after_atomic();
2812
2813 return unlikely(tif_need_resched());
2814}
2815
2816#else
2817static inline int tsk_is_polling(struct task_struct *p) { return 0; }
2818static inline void __current_set_polling(void) { }
2819static inline void __current_clr_polling(void) { }
2820
2821static inline bool __must_check current_set_polling_and_test(void)
2822{
2823 return unlikely(tif_need_resched());
2824}
2825static inline bool __must_check current_clr_polling_and_test(void)
2826{
2827 return unlikely(tif_need_resched());
2828}
2829#endif
2830
2831static inline void current_clr_polling(void)
2832{
2833 __current_clr_polling();
2834
2835
2836
2837
2838
2839
2840
2841 smp_mb();
2842
2843 preempt_fold_need_resched();
2844}
2845
2846static __always_inline bool need_resched(void)
2847{
2848 return unlikely(tif_need_resched());
2849}
2850
2851
2852
2853
2854void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
2855void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2856
2857static inline void thread_group_cputime_init(struct signal_struct *sig)
2858{
2859 raw_spin_lock_init(&sig->cputimer.lock);
2860}
2861
2862
2863
2864
2865
2866
2867
2868extern void recalc_sigpending_and_wake(struct task_struct *t);
2869extern void recalc_sigpending(void);
2870
2871extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
2872
2873static inline void signal_wake_up(struct task_struct *t, bool resume)
2874{
2875 signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
2876}
2877static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
2878{
2879 signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
2880}
2881
2882
2883
2884
2885#ifdef CONFIG_SMP
2886
2887static inline unsigned int task_cpu(const struct task_struct *p)
2888{
2889 return task_thread_info(p)->cpu;
2890}
2891
2892static inline int task_node(const struct task_struct *p)
2893{
2894 return cpu_to_node(task_cpu(p));
2895}
2896
2897extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
2898
2899#else
2900
2901static inline unsigned int task_cpu(const struct task_struct *p)
2902{
2903 return 0;
2904}
2905
2906static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2907{
2908}
2909
2910#endif
2911
2912extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2913extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2914
2915#ifdef CONFIG_CGROUP_SCHED
2916extern struct task_group root_task_group;
2917#endif
2918
2919extern int task_can_switch_user(struct user_struct *up,
2920 struct task_struct *tsk);
2921
2922#ifdef CONFIG_TASK_XACCT
2923static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2924{
2925 tsk->ioac.rchar += amt;
2926}
2927
2928static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2929{
2930 tsk->ioac.wchar += amt;
2931}
2932
2933static inline void inc_syscr(struct task_struct *tsk)
2934{
2935 tsk->ioac.syscr++;
2936}
2937
2938static inline void inc_syscw(struct task_struct *tsk)
2939{
2940 tsk->ioac.syscw++;
2941}
2942#else
2943static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2944{
2945}
2946
2947static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2948{
2949}
2950
2951static inline void inc_syscr(struct task_struct *tsk)
2952{
2953}
2954
2955static inline void inc_syscw(struct task_struct *tsk)
2956{
2957}
2958#endif
2959
2960#ifndef TASK_SIZE_OF
2961#define TASK_SIZE_OF(tsk) TASK_SIZE
2962#endif
2963
2964#ifdef CONFIG_MEMCG
2965extern void mm_update_next_owner(struct mm_struct *mm);
2966extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
2967#else
2968static inline void mm_update_next_owner(struct mm_struct *mm)
2969{
2970}
2971
2972static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
2973{
2974}
2975#endif
2976
2977static inline unsigned long task_rlimit(const struct task_struct *tsk,
2978 unsigned int limit)
2979{
2980 return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
2981}
2982
2983static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
2984 unsigned int limit)
2985{
2986 return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
2987}
2988
2989static inline unsigned long rlimit(unsigned int limit)
2990{
2991 return task_rlimit(current, limit);
2992}
2993
2994static inline unsigned long rlimit_max(unsigned int limit)
2995{
2996 return task_rlimit_max(current, limit);
2997}
2998
2999#endif
3000