1#ifndef _LINUX_SCHED_H
2#define _LINUX_SCHED_H
3
4#include <uapi/linux/sched.h>
5
6#include <linux/sched/prio.h>
7
8
9struct sched_param {
10 int sched_priority;
11};
12
13#include <asm/param.h>
14
15#include <linux/capability.h>
16#include <linux/threads.h>
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/timex.h>
20#include <linux/jiffies.h>
21#include <linux/plist.h>
22#include <linux/rbtree.h>
23#include <linux/thread_info.h>
24#include <linux/cpumask.h>
25#include <linux/errno.h>
26#include <linux/nodemask.h>
27#include <linux/mm_types.h>
28#include <linux/preempt_mask.h>
29
30#include <asm/page.h>
31#include <asm/ptrace.h>
32#include <linux/cputime.h>
33
34#include <linux/smp.h>
35#include <linux/sem.h>
36#include <linux/shm.h>
37#include <linux/signal.h>
38#include <linux/compiler.h>
39#include <linux/completion.h>
40#include <linux/pid.h>
41#include <linux/percpu.h>
42#include <linux/topology.h>
43#include <linux/proportions.h>
44#include <linux/seccomp.h>
45#include <linux/rcupdate.h>
46#include <linux/rculist.h>
47#include <linux/rtmutex.h>
48
49#include <linux/time.h>
50#include <linux/param.h>
51#include <linux/resource.h>
52#include <linux/timer.h>
53#include <linux/hrtimer.h>
54#include <linux/task_io_accounting.h>
55#include <linux/latencytop.h>
56#include <linux/cred.h>
57#include <linux/llist.h>
58#include <linux/uidgid.h>
59#include <linux/gfp.h>
60#include <linux/magic.h>
61
62#include <asm/processor.h>
63
64#define SCHED_ATTR_SIZE_VER0 48
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110struct sched_attr {
111 u32 size;
112
113 u32 sched_policy;
114 u64 sched_flags;
115
116
117 s32 sched_nice;
118
119
120 u32 sched_priority;
121
122
123 u64 sched_runtime;
124 u64 sched_deadline;
125 u64 sched_period;
126};
127
128struct exec_domain;
129struct futex_pi_state;
130struct robust_list_head;
131struct bio_list;
132struct fs_struct;
133struct perf_event_context;
134struct blk_plug;
135struct filename;
136
137#define VMACACHE_BITS 2
138#define VMACACHE_SIZE (1U << VMACACHE_BITS)
139#define VMACACHE_MASK (VMACACHE_SIZE - 1)
140
141
142
143
144
145
146
147
148
149
150
151extern unsigned long avenrun[];
152extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
153
154#define FSHIFT 11
155#define FIXED_1 (1<<FSHIFT)
156#define LOAD_FREQ (5*HZ+1)
157#define EXP_1 1884
158#define EXP_5 2014
159#define EXP_15 2037
160
161#define CALC_LOAD(load,exp,n) \
162 load *= exp; \
163 load += n*(FIXED_1-exp); \
164 load >>= FSHIFT;
165
166extern unsigned long total_forks;
167extern int nr_threads;
168DECLARE_PER_CPU(unsigned long, process_counts);
169extern int nr_processes(void);
170extern unsigned long nr_running(void);
171extern bool single_task_running(void);
172extern unsigned long nr_iowait(void);
173extern unsigned long nr_iowait_cpu(int cpu);
174extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
175
176extern void calc_global_load(unsigned long ticks);
177extern void update_cpu_load_nohz(void);
178
179extern unsigned long get_parent_ip(unsigned long addr);
180
181extern void dump_cpu_task(int cpu);
182
183struct seq_file;
184struct cfs_rq;
185struct task_group;
186#ifdef CONFIG_SCHED_DEBUG
187extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
188extern void proc_sched_set_task(struct task_struct *p);
189extern void
190print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
191#endif
192
193
194
195
196
197
198
199
200
201
202
203#define TASK_RUNNING 0
204#define TASK_INTERRUPTIBLE 1
205#define TASK_UNINTERRUPTIBLE 2
206#define __TASK_STOPPED 4
207#define __TASK_TRACED 8
208
209#define EXIT_DEAD 16
210#define EXIT_ZOMBIE 32
211#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
212
213#define TASK_DEAD 64
214#define TASK_WAKEKILL 128
215#define TASK_WAKING 256
216#define TASK_PARKED 512
217#define TASK_STATE_MAX 1024
218
219#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWP"
220
221extern char ___assert_task_state[1 - 2*!!(
222 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
223
224
225#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
226#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
227#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
228
229
230#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
231#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
232
233
234#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
235 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
236 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
237
238#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
239#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
240#define task_is_stopped_or_traced(task) \
241 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
242#define task_contributes_to_load(task) \
243 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
244 (task->flags & PF_FROZEN) == 0)
245
246#define __set_task_state(tsk, state_value) \
247 do { (tsk)->state = (state_value); } while (0)
248#define set_task_state(tsk, state_value) \
249 set_mb((tsk)->state, (state_value))
250
251
252
253
254
255
256
257
258
259
260
261
262#define __set_current_state(state_value) \
263 do { current->state = (state_value); } while (0)
264#define set_current_state(state_value) \
265 set_mb(current->state, (state_value))
266
267
268#define TASK_COMM_LEN 16
269
270#include <linux/spinlock.h>
271
272
273
274
275
276
277
278extern rwlock_t tasklist_lock;
279extern spinlock_t mmlist_lock;
280
281struct task_struct;
282
283#ifdef CONFIG_PROVE_RCU
284extern int lockdep_tasklist_lock_is_held(void);
285#endif
286
287extern void sched_init(void);
288extern void sched_init_smp(void);
289extern asmlinkage void schedule_tail(struct task_struct *prev);
290extern void init_idle(struct task_struct *idle, int cpu);
291extern void init_idle_bootup_task(struct task_struct *idle);
292
293extern int runqueue_is_locked(int cpu);
294
295#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
296extern void nohz_balance_enter_idle(int cpu);
297extern void set_cpu_sd_state_idle(void);
298extern int get_nohz_timer_target(int pinned);
299#else
300static inline void nohz_balance_enter_idle(int cpu) { }
301static inline void set_cpu_sd_state_idle(void) { }
302static inline int get_nohz_timer_target(int pinned)
303{
304 return smp_processor_id();
305}
306#endif
307
308
309
310
311extern void show_state_filter(unsigned long state_filter);
312
313static inline void show_state(void)
314{
315 show_state_filter(0);
316}
317
318extern void show_regs(struct pt_regs *);
319
320
321
322
323
324
325extern void show_stack(struct task_struct *task, unsigned long *sp);
326
327void io_schedule(void);
328long io_schedule_timeout(long timeout);
329
330extern void cpu_init (void);
331extern void trap_init(void);
332extern void update_process_times(int user);
333extern void scheduler_tick(void);
334
335extern void sched_show_task(struct task_struct *p);
336
337#ifdef CONFIG_LOCKUP_DETECTOR
338extern void touch_softlockup_watchdog(void);
339extern void touch_softlockup_watchdog_sync(void);
340extern void touch_all_softlockup_watchdogs(void);
341extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
342 void __user *buffer,
343 size_t *lenp, loff_t *ppos);
344extern unsigned int softlockup_panic;
345void lockup_detector_init(void);
346#else
347static inline void touch_softlockup_watchdog(void)
348{
349}
350static inline void touch_softlockup_watchdog_sync(void)
351{
352}
353static inline void touch_all_softlockup_watchdogs(void)
354{
355}
356static inline void lockup_detector_init(void)
357{
358}
359#endif
360
361#ifdef CONFIG_DETECT_HUNG_TASK
362void reset_hung_task_detector(void);
363#else
364static inline void reset_hung_task_detector(void)
365{
366}
367#endif
368
369
370#define __sched __attribute__((__section__(".sched.text")))
371
372
373extern char __sched_text_start[], __sched_text_end[];
374
375
376extern int in_sched_functions(unsigned long addr);
377
378#define MAX_SCHEDULE_TIMEOUT LONG_MAX
379extern signed long schedule_timeout(signed long timeout);
380extern signed long schedule_timeout_interruptible(signed long timeout);
381extern signed long schedule_timeout_killable(signed long timeout);
382extern signed long schedule_timeout_uninterruptible(signed long timeout);
383asmlinkage void schedule(void);
384extern void schedule_preempt_disabled(void);
385
386struct nsproxy;
387struct user_namespace;
388
389#ifdef CONFIG_MMU
390extern void arch_pick_mmap_layout(struct mm_struct *mm);
391extern unsigned long
392arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
393 unsigned long, unsigned long);
394extern unsigned long
395arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
396 unsigned long len, unsigned long pgoff,
397 unsigned long flags);
398#else
399static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
400#endif
401
402#define SUID_DUMP_DISABLE 0
403#define SUID_DUMP_USER 1
404#define SUID_DUMP_ROOT 2
405
406
407
408
409#define MMF_DUMPABLE_BITS 2
410#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
411
412extern void set_dumpable(struct mm_struct *mm, int value);
413
414
415
416
417
418
419static inline int __get_dumpable(unsigned long mm_flags)
420{
421 return mm_flags & MMF_DUMPABLE_MASK;
422}
423
424static inline int get_dumpable(struct mm_struct *mm)
425{
426 return __get_dumpable(mm->flags);
427}
428
429
430#define MMF_DUMP_ANON_PRIVATE 2
431#define MMF_DUMP_ANON_SHARED 3
432#define MMF_DUMP_MAPPED_PRIVATE 4
433#define MMF_DUMP_MAPPED_SHARED 5
434#define MMF_DUMP_ELF_HEADERS 6
435#define MMF_DUMP_HUGETLB_PRIVATE 7
436#define MMF_DUMP_HUGETLB_SHARED 8
437
438#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
439#define MMF_DUMP_FILTER_BITS 7
440#define MMF_DUMP_FILTER_MASK \
441 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
442#define MMF_DUMP_FILTER_DEFAULT \
443 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\
444 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
445
446#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
447# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS)
448#else
449# define MMF_DUMP_MASK_DEFAULT_ELF 0
450#endif
451
452#define MMF_VM_MERGEABLE 16
453#define MMF_VM_HUGEPAGE 17
454#define MMF_EXE_FILE_CHANGED 18
455
456#define MMF_HAS_UPROBES 19
457#define MMF_RECALC_UPROBES 20
458
459#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
460
461struct sighand_struct {
462 atomic_t count;
463 struct k_sigaction action[_NSIG];
464 spinlock_t siglock;
465 wait_queue_head_t signalfd_wqh;
466};
467
468struct pacct_struct {
469 int ac_flag;
470 long ac_exitcode;
471 unsigned long ac_mem;
472 cputime_t ac_utime, ac_stime;
473 unsigned long ac_minflt, ac_majflt;
474};
475
476struct cpu_itimer {
477 cputime_t expires;
478 cputime_t incr;
479 u32 error;
480 u32 incr_error;
481};
482
483
484
485
486
487
488
489
490struct cputime {
491 cputime_t utime;
492 cputime_t stime;
493};
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509struct task_cputime {
510 cputime_t utime;
511 cputime_t stime;
512 unsigned long long sum_exec_runtime;
513};
514
515#define prof_exp stime
516#define virt_exp utime
517#define sched_exp sum_exec_runtime
518
519#define INIT_CPUTIME \
520 (struct task_cputime) { \
521 .utime = 0, \
522 .stime = 0, \
523 .sum_exec_runtime = 0, \
524 }
525
526#ifdef CONFIG_PREEMPT_COUNT
527#define PREEMPT_DISABLED (1 + PREEMPT_ENABLED)
528#else
529#define PREEMPT_DISABLED PREEMPT_ENABLED
530#endif
531
532
533
534
535
536
537
538
539#define INIT_PREEMPT_COUNT (PREEMPT_DISABLED + PREEMPT_ACTIVE)
540
541
542
543
544
545
546
547
548
549
550
551struct thread_group_cputimer {
552 struct task_cputime cputime;
553 int running;
554 raw_spinlock_t lock;
555};
556
557#include <linux/rwsem.h>
558struct autogroup;
559
560
561
562
563
564
565
566
567struct signal_struct {
568 atomic_t sigcnt;
569 atomic_t live;
570 int nr_threads;
571 struct list_head thread_head;
572
573 wait_queue_head_t wait_chldexit;
574
575
576 struct task_struct *curr_target;
577
578
579 struct sigpending shared_pending;
580
581
582 int group_exit_code;
583
584
585
586
587
588 int notify_count;
589 struct task_struct *group_exit_task;
590
591
592 int group_stop_count;
593 unsigned int flags;
594
595
596
597
598
599
600
601
602
603
604 unsigned int is_child_subreaper:1;
605 unsigned int has_child_subreaper:1;
606
607
608 int posix_timer_id;
609 struct list_head posix_timers;
610
611
612 struct hrtimer real_timer;
613 struct pid *leader_pid;
614 ktime_t it_real_incr;
615
616
617
618
619
620
621 struct cpu_itimer it[2];
622
623
624
625
626
627 struct thread_group_cputimer cputimer;
628
629
630 struct task_cputime cputime_expires;
631
632 struct list_head cpu_timers[3];
633
634 struct pid *tty_old_pgrp;
635
636
637 int leader;
638
639 struct tty_struct *tty;
640
641#ifdef CONFIG_SCHED_AUTOGROUP
642 struct autogroup *autogroup;
643#endif
644
645
646
647
648
649
650 seqlock_t stats_lock;
651 cputime_t utime, stime, cutime, cstime;
652 cputime_t gtime;
653 cputime_t cgtime;
654#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
655 struct cputime prev_cputime;
656#endif
657 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
658 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
659 unsigned long inblock, oublock, cinblock, coublock;
660 unsigned long maxrss, cmaxrss;
661 struct task_io_accounting ioac;
662
663
664
665
666
667
668
669 unsigned long long sum_sched_runtime;
670
671
672
673
674
675
676
677
678
679
680 struct rlimit rlim[RLIM_NLIMITS];
681
682#ifdef CONFIG_BSD_PROCESS_ACCT
683 struct pacct_struct pacct;
684#endif
685#ifdef CONFIG_TASKSTATS
686 struct taskstats *stats;
687#endif
688#ifdef CONFIG_AUDIT
689 unsigned audit_tty;
690 unsigned audit_tty_log_passwd;
691 struct tty_audit_buf *tty_audit_buf;
692#endif
693#ifdef CONFIG_CGROUPS
694
695
696
697
698
699
700
701
702
703 struct rw_semaphore group_rwsem;
704#endif
705
706 oom_flags_t oom_flags;
707 short oom_score_adj;
708 short oom_score_adj_min;
709
710
711 struct mutex cred_guard_mutex;
712
713
714};
715
716
717
718
719#define SIGNAL_STOP_STOPPED 0x00000001
720#define SIGNAL_STOP_CONTINUED 0x00000002
721#define SIGNAL_GROUP_EXIT 0x00000004
722#define SIGNAL_GROUP_COREDUMP 0x00000008
723
724
725
726#define SIGNAL_CLD_STOPPED 0x00000010
727#define SIGNAL_CLD_CONTINUED 0x00000020
728#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
729
730#define SIGNAL_UNKILLABLE 0x00000040
731
732
733static inline int signal_group_exit(const struct signal_struct *sig)
734{
735 return (sig->flags & SIGNAL_GROUP_EXIT) ||
736 (sig->group_exit_task != NULL);
737}
738
739
740
741
742struct user_struct {
743 atomic_t __count;
744 atomic_t processes;
745 atomic_t sigpending;
746#ifdef CONFIG_INOTIFY_USER
747 atomic_t inotify_watches;
748 atomic_t inotify_devs;
749#endif
750#ifdef CONFIG_FANOTIFY
751 atomic_t fanotify_listeners;
752#endif
753#ifdef CONFIG_EPOLL
754 atomic_long_t epoll_watches;
755#endif
756#ifdef CONFIG_POSIX_MQUEUE
757
758 unsigned long mq_bytes;
759#endif
760 unsigned long locked_shm;
761
762#ifdef CONFIG_KEYS
763 struct key *uid_keyring;
764 struct key *session_keyring;
765#endif
766
767
768 struct hlist_node uidhash_node;
769 kuid_t uid;
770
771#ifdef CONFIG_PERF_EVENTS
772 atomic_long_t locked_vm;
773#endif
774};
775
776extern int uids_sysfs_init(void);
777
778extern struct user_struct *find_user(kuid_t);
779
780extern struct user_struct root_user;
781#define INIT_USER (&root_user)
782
783
784struct backing_dev_info;
785struct reclaim_state;
786
787#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
788struct sched_info {
789
790 unsigned long pcount;
791 unsigned long long run_delay;
792
793
794 unsigned long long last_arrival,
795 last_queued;
796};
797#endif
798
799#ifdef CONFIG_TASK_DELAY_ACCT
800struct task_delay_info {
801 spinlock_t lock;
802 unsigned int flags;
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819 u64 blkio_start;
820 u64 blkio_delay;
821 u64 swapin_delay;
822 u32 blkio_count;
823
824 u32 swapin_count;
825
826
827 u64 freepages_start;
828 u64 freepages_delay;
829 u32 freepages_count;
830};
831#endif
832
833static inline int sched_info_on(void)
834{
835#ifdef CONFIG_SCHEDSTATS
836 return 1;
837#elif defined(CONFIG_TASK_DELAY_ACCT)
838 extern int delayacct_on;
839 return delayacct_on;
840#else
841 return 0;
842#endif
843}
844
845enum cpu_idle_type {
846 CPU_IDLE,
847 CPU_NOT_IDLE,
848 CPU_NEWLY_IDLE,
849 CPU_MAX_IDLE_TYPES
850};
851
852
853
854
855#define SCHED_CAPACITY_SHIFT 10
856#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
857
858
859
860
861#ifdef CONFIG_SMP
862#define SD_LOAD_BALANCE 0x0001
863#define SD_BALANCE_NEWIDLE 0x0002
864#define SD_BALANCE_EXEC 0x0004
865#define SD_BALANCE_FORK 0x0008
866#define SD_BALANCE_WAKE 0x0010
867#define SD_WAKE_AFFINE 0x0020
868#define SD_SHARE_CPUCAPACITY 0x0080
869#define SD_SHARE_POWERDOMAIN 0x0100
870#define SD_SHARE_PKG_RESOURCES 0x0200
871#define SD_SERIALIZE 0x0400
872#define SD_ASYM_PACKING 0x0800
873#define SD_PREFER_SIBLING 0x1000
874#define SD_OVERLAP 0x2000
875#define SD_NUMA 0x4000
876
877#ifdef CONFIG_SCHED_SMT
878static inline int cpu_smt_flags(void)
879{
880 return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
881}
882#endif
883
884#ifdef CONFIG_SCHED_MC
885static inline int cpu_core_flags(void)
886{
887 return SD_SHARE_PKG_RESOURCES;
888}
889#endif
890
891#ifdef CONFIG_NUMA
892static inline int cpu_numa_flags(void)
893{
894 return SD_NUMA;
895}
896#endif
897
898struct sched_domain_attr {
899 int relax_domain_level;
900};
901
902#define SD_ATTR_INIT (struct sched_domain_attr) { \
903 .relax_domain_level = -1, \
904}
905
906extern int sched_domain_level_max;
907
908struct sched_group;
909
910struct sched_domain {
911
912 struct sched_domain *parent;
913 struct sched_domain *child;
914 struct sched_group *groups;
915 unsigned long min_interval;
916 unsigned long max_interval;
917 unsigned int busy_factor;
918 unsigned int imbalance_pct;
919 unsigned int cache_nice_tries;
920 unsigned int busy_idx;
921 unsigned int idle_idx;
922 unsigned int newidle_idx;
923 unsigned int wake_idx;
924 unsigned int forkexec_idx;
925 unsigned int smt_gain;
926
927 int nohz_idle;
928 int flags;
929 int level;
930
931
932 unsigned long last_balance;
933 unsigned int balance_interval;
934 unsigned int nr_balance_failed;
935
936
937 u64 max_newidle_lb_cost;
938 unsigned long next_decay_max_lb_cost;
939
940#ifdef CONFIG_SCHEDSTATS
941
942 unsigned int lb_count[CPU_MAX_IDLE_TYPES];
943 unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
944 unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
945 unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
946 unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
947 unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
948 unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
949 unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
950
951
952 unsigned int alb_count;
953 unsigned int alb_failed;
954 unsigned int alb_pushed;
955
956
957 unsigned int sbe_count;
958 unsigned int sbe_balanced;
959 unsigned int sbe_pushed;
960
961
962 unsigned int sbf_count;
963 unsigned int sbf_balanced;
964 unsigned int sbf_pushed;
965
966
967 unsigned int ttwu_wake_remote;
968 unsigned int ttwu_move_affine;
969 unsigned int ttwu_move_balance;
970#endif
971#ifdef CONFIG_SCHED_DEBUG
972 char *name;
973#endif
974 union {
975 void *private;
976 struct rcu_head rcu;
977 };
978
979 unsigned int span_weight;
980
981
982
983
984
985
986
987 unsigned long span[0];
988};
989
990static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
991{
992 return to_cpumask(sd->span);
993}
994
995extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
996 struct sched_domain_attr *dattr_new);
997
998
999cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
1000void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
1001
1002bool cpus_share_cache(int this_cpu, int that_cpu);
1003
1004typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
1005typedef int (*sched_domain_flags_f)(void);
1006
1007#define SDTL_OVERLAP 0x01
1008
1009struct sd_data {
1010 struct sched_domain **__percpu sd;
1011 struct sched_group **__percpu sg;
1012 struct sched_group_capacity **__percpu sgc;
1013};
1014
1015struct sched_domain_topology_level {
1016 sched_domain_mask_f mask;
1017 sched_domain_flags_f sd_flags;
1018 int flags;
1019 int numa_level;
1020 struct sd_data data;
1021#ifdef CONFIG_SCHED_DEBUG
1022 char *name;
1023#endif
1024};
1025
1026extern struct sched_domain_topology_level *sched_domain_topology;
1027
1028extern void set_sched_topology(struct sched_domain_topology_level *tl);
1029extern void wake_up_if_idle(int cpu);
1030
1031#ifdef CONFIG_SCHED_DEBUG
1032# define SD_INIT_NAME(type) .name = #type
1033#else
1034# define SD_INIT_NAME(type)
1035#endif
1036
1037#else
1038
1039struct sched_domain_attr;
1040
1041static inline void
1042partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1043 struct sched_domain_attr *dattr_new)
1044{
1045}
1046
1047static inline bool cpus_share_cache(int this_cpu, int that_cpu)
1048{
1049 return true;
1050}
1051
1052#endif
1053
1054
1055struct io_context;
1056
1057
1058#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
1059extern void prefetch_stack(struct task_struct *t);
1060#else
1061static inline void prefetch_stack(struct task_struct *t) { }
1062#endif
1063
1064struct audit_context;
1065struct mempolicy;
1066struct pipe_inode_info;
1067struct uts_namespace;
1068
1069struct load_weight {
1070 unsigned long weight;
1071 u32 inv_weight;
1072};
1073
1074struct sched_avg {
1075
1076
1077
1078
1079
1080 u32 runnable_avg_sum, runnable_avg_period;
1081 u64 last_runnable_update;
1082 s64 decay_count;
1083 unsigned long load_avg_contrib;
1084};
1085
1086#ifdef CONFIG_SCHEDSTATS
1087struct sched_statistics {
1088 u64 wait_start;
1089 u64 wait_max;
1090 u64 wait_count;
1091 u64 wait_sum;
1092 u64 iowait_count;
1093 u64 iowait_sum;
1094
1095 u64 sleep_start;
1096 u64 sleep_max;
1097 s64 sum_sleep_runtime;
1098
1099 u64 block_start;
1100 u64 block_max;
1101 u64 exec_max;
1102 u64 slice_max;
1103
1104 u64 nr_migrations_cold;
1105 u64 nr_failed_migrations_affine;
1106 u64 nr_failed_migrations_running;
1107 u64 nr_failed_migrations_hot;
1108 u64 nr_forced_migrations;
1109
1110 u64 nr_wakeups;
1111 u64 nr_wakeups_sync;
1112 u64 nr_wakeups_migrate;
1113 u64 nr_wakeups_local;
1114 u64 nr_wakeups_remote;
1115 u64 nr_wakeups_affine;
1116 u64 nr_wakeups_affine_attempts;
1117 u64 nr_wakeups_passive;
1118 u64 nr_wakeups_idle;
1119};
1120#endif
1121
1122struct sched_entity {
1123 struct load_weight load;
1124 struct rb_node run_node;
1125 struct list_head group_node;
1126 unsigned int on_rq;
1127
1128 u64 exec_start;
1129 u64 sum_exec_runtime;
1130 u64 vruntime;
1131 u64 prev_sum_exec_runtime;
1132
1133 u64 nr_migrations;
1134
1135#ifdef CONFIG_SCHEDSTATS
1136 struct sched_statistics statistics;
1137#endif
1138
1139#ifdef CONFIG_FAIR_GROUP_SCHED
1140 int depth;
1141 struct sched_entity *parent;
1142
1143 struct cfs_rq *cfs_rq;
1144
1145 struct cfs_rq *my_q;
1146#endif
1147
1148#ifdef CONFIG_SMP
1149
1150 struct sched_avg avg;
1151#endif
1152};
1153
1154struct sched_rt_entity {
1155 struct list_head run_list;
1156 unsigned long timeout;
1157 unsigned long watchdog_stamp;
1158 unsigned int time_slice;
1159
1160 struct sched_rt_entity *back;
1161#ifdef CONFIG_RT_GROUP_SCHED
1162 struct sched_rt_entity *parent;
1163
1164 struct rt_rq *rt_rq;
1165
1166 struct rt_rq *my_q;
1167#endif
1168};
1169
1170struct sched_dl_entity {
1171 struct rb_node rb_node;
1172
1173
1174
1175
1176
1177
1178 u64 dl_runtime;
1179 u64 dl_deadline;
1180 u64 dl_period;
1181 u64 dl_bw;
1182
1183
1184
1185
1186
1187
1188 s64 runtime;
1189 u64 deadline;
1190 unsigned int flags;
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210 int dl_throttled, dl_new, dl_boosted, dl_yielded;
1211
1212
1213
1214
1215
1216 struct hrtimer dl_timer;
1217};
1218
1219union rcu_special {
1220 struct {
1221 bool blocked;
1222 bool need_qs;
1223 } b;
1224 short s;
1225};
1226struct rcu_node;
1227
1228enum perf_event_task_context {
1229 perf_invalid_context = -1,
1230 perf_hw_context = 0,
1231 perf_sw_context,
1232 perf_nr_task_contexts,
1233};
1234
1235struct task_struct {
1236 volatile long state;
1237 void *stack;
1238 atomic_t usage;
1239 unsigned int flags;
1240 unsigned int ptrace;
1241
1242#ifdef CONFIG_SMP
1243 struct llist_node wake_entry;
1244 int on_cpu;
1245 struct task_struct *last_wakee;
1246 unsigned long wakee_flips;
1247 unsigned long wakee_flip_decay_ts;
1248
1249 int wake_cpu;
1250#endif
1251 int on_rq;
1252
1253 int prio, static_prio, normal_prio;
1254 unsigned int rt_priority;
1255 const struct sched_class *sched_class;
1256 struct sched_entity se;
1257 struct sched_rt_entity rt;
1258#ifdef CONFIG_CGROUP_SCHED
1259 struct task_group *sched_task_group;
1260#endif
1261 struct sched_dl_entity dl;
1262
1263#ifdef CONFIG_PREEMPT_NOTIFIERS
1264
1265 struct hlist_head preempt_notifiers;
1266#endif
1267
1268#ifdef CONFIG_BLK_DEV_IO_TRACE
1269 unsigned int btrace_seq;
1270#endif
1271
1272 unsigned int policy;
1273 int nr_cpus_allowed;
1274 cpumask_t cpus_allowed;
1275
1276#ifdef CONFIG_PREEMPT_RCU
1277 int rcu_read_lock_nesting;
1278 union rcu_special rcu_read_unlock_special;
1279 struct list_head rcu_node_entry;
1280#endif
1281#ifdef CONFIG_TREE_PREEMPT_RCU
1282 struct rcu_node *rcu_blocked_node;
1283#endif
1284#ifdef CONFIG_TASKS_RCU
1285 unsigned long rcu_tasks_nvcsw;
1286 bool rcu_tasks_holdout;
1287 struct list_head rcu_tasks_holdout_list;
1288 int rcu_tasks_idle_cpu;
1289#endif
1290
1291#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1292 struct sched_info sched_info;
1293#endif
1294
1295 struct list_head tasks;
1296#ifdef CONFIG_SMP
1297 struct plist_node pushable_tasks;
1298 struct rb_node pushable_dl_tasks;
1299#endif
1300
1301 struct mm_struct *mm, *active_mm;
1302#ifdef CONFIG_COMPAT_BRK
1303 unsigned brk_randomized:1;
1304#endif
1305
1306 u32 vmacache_seqnum;
1307 struct vm_area_struct *vmacache[VMACACHE_SIZE];
1308#if defined(SPLIT_RSS_COUNTING)
1309 struct task_rss_stat rss_stat;
1310#endif
1311
1312 int exit_state;
1313 int exit_code, exit_signal;
1314 int pdeath_signal;
1315 unsigned int jobctl;
1316
1317
1318 unsigned int personality;
1319
1320 unsigned in_execve:1;
1321
1322 unsigned in_iowait:1;
1323
1324
1325 unsigned sched_reset_on_fork:1;
1326 unsigned sched_contributes_to_load:1;
1327
1328 unsigned long atomic_flags;
1329
1330 pid_t pid;
1331 pid_t tgid;
1332
1333#ifdef CONFIG_CC_STACKPROTECTOR
1334
1335 unsigned long stack_canary;
1336#endif
1337
1338
1339
1340
1341
1342 struct task_struct __rcu *real_parent;
1343 struct task_struct __rcu *parent;
1344
1345
1346
1347 struct list_head children;
1348 struct list_head sibling;
1349 struct task_struct *group_leader;
1350
1351
1352
1353
1354
1355
1356 struct list_head ptraced;
1357 struct list_head ptrace_entry;
1358
1359
1360 struct pid_link pids[PIDTYPE_MAX];
1361 struct list_head thread_group;
1362 struct list_head thread_node;
1363
1364 struct completion *vfork_done;
1365 int __user *set_child_tid;
1366 int __user *clear_child_tid;
1367
1368 cputime_t utime, stime, utimescaled, stimescaled;
1369 cputime_t gtime;
1370#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
1371 struct cputime prev_cputime;
1372#endif
1373#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1374 seqlock_t vtime_seqlock;
1375 unsigned long long vtime_snap;
1376 enum {
1377 VTIME_SLEEPING = 0,
1378 VTIME_USER,
1379 VTIME_SYS,
1380 } vtime_snap_whence;
1381#endif
1382 unsigned long nvcsw, nivcsw;
1383 u64 start_time;
1384 u64 real_start_time;
1385
1386 unsigned long min_flt, maj_flt;
1387
1388 struct task_cputime cputime_expires;
1389 struct list_head cpu_timers[3];
1390
1391
1392 const struct cred __rcu *real_cred;
1393
1394 const struct cred __rcu *cred;
1395
1396 char comm[TASK_COMM_LEN];
1397
1398
1399
1400
1401 int link_count, total_link_count;
1402#ifdef CONFIG_SYSVIPC
1403
1404 struct sysv_sem sysvsem;
1405 struct sysv_shm sysvshm;
1406#endif
1407#ifdef CONFIG_DETECT_HUNG_TASK
1408
1409 unsigned long last_switch_count;
1410#endif
1411
1412 struct thread_struct thread;
1413
1414 struct fs_struct *fs;
1415
1416 struct files_struct *files;
1417
1418 struct nsproxy *nsproxy;
1419
1420 struct signal_struct *signal;
1421 struct sighand_struct *sighand;
1422
1423 sigset_t blocked, real_blocked;
1424 sigset_t saved_sigmask;
1425 struct sigpending pending;
1426
1427 unsigned long sas_ss_sp;
1428 size_t sas_ss_size;
1429 int (*notifier)(void *priv);
1430 void *notifier_data;
1431 sigset_t *notifier_mask;
1432 struct callback_head *task_works;
1433
1434 struct audit_context *audit_context;
1435#ifdef CONFIG_AUDITSYSCALL
1436 kuid_t loginuid;
1437 unsigned int sessionid;
1438#endif
1439 struct seccomp seccomp;
1440
1441
1442 u32 parent_exec_id;
1443 u32 self_exec_id;
1444
1445
1446 spinlock_t alloc_lock;
1447
1448
1449 raw_spinlock_t pi_lock;
1450
1451#ifdef CONFIG_RT_MUTEXES
1452
1453 struct rb_root pi_waiters;
1454 struct rb_node *pi_waiters_leftmost;
1455
1456 struct rt_mutex_waiter *pi_blocked_on;
1457#endif
1458
1459#ifdef CONFIG_DEBUG_MUTEXES
1460
1461 struct mutex_waiter *blocked_on;
1462#endif
1463#ifdef CONFIG_TRACE_IRQFLAGS
1464 unsigned int irq_events;
1465 unsigned long hardirq_enable_ip;
1466 unsigned long hardirq_disable_ip;
1467 unsigned int hardirq_enable_event;
1468 unsigned int hardirq_disable_event;
1469 int hardirqs_enabled;
1470 int hardirq_context;
1471 unsigned long softirq_disable_ip;
1472 unsigned long softirq_enable_ip;
1473 unsigned int softirq_disable_event;
1474 unsigned int softirq_enable_event;
1475 int softirqs_enabled;
1476 int softirq_context;
1477#endif
1478#ifdef CONFIG_LOCKDEP
1479# define MAX_LOCK_DEPTH 48UL
1480 u64 curr_chain_key;
1481 int lockdep_depth;
1482 unsigned int lockdep_recursion;
1483 struct held_lock held_locks[MAX_LOCK_DEPTH];
1484 gfp_t lockdep_reclaim_gfp;
1485#endif
1486
1487
1488 void *journal_info;
1489
1490
1491 struct bio_list *bio_list;
1492
1493#ifdef CONFIG_BLOCK
1494
1495 struct blk_plug *plug;
1496#endif
1497
1498
1499 struct reclaim_state *reclaim_state;
1500
1501 struct backing_dev_info *backing_dev_info;
1502
1503 struct io_context *io_context;
1504
1505 unsigned long ptrace_message;
1506 siginfo_t *last_siginfo;
1507 struct task_io_accounting ioac;
1508#if defined(CONFIG_TASK_XACCT)
1509 u64 acct_rss_mem1;
1510 u64 acct_vm_mem1;
1511 cputime_t acct_timexpd;
1512#endif
1513#ifdef CONFIG_CPUSETS
1514 nodemask_t mems_allowed;
1515 seqcount_t mems_allowed_seq;
1516 int cpuset_mem_spread_rotor;
1517 int cpuset_slab_spread_rotor;
1518#endif
1519#ifdef CONFIG_CGROUPS
1520
1521 struct css_set __rcu *cgroups;
1522
1523 struct list_head cg_list;
1524#endif
1525#ifdef CONFIG_FUTEX
1526 struct robust_list_head __user *robust_list;
1527#ifdef CONFIG_COMPAT
1528 struct compat_robust_list_head __user *compat_robust_list;
1529#endif
1530 struct list_head pi_state_list;
1531 struct futex_pi_state *pi_state_cache;
1532#endif
1533#ifdef CONFIG_PERF_EVENTS
1534 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1535 struct mutex perf_event_mutex;
1536 struct list_head perf_event_list;
1537#endif
1538#ifdef CONFIG_DEBUG_PREEMPT
1539 unsigned long preempt_disable_ip;
1540#endif
1541#ifdef CONFIG_NUMA
1542 struct mempolicy *mempolicy;
1543 short il_next;
1544 short pref_node_fork;
1545#endif
1546#ifdef CONFIG_NUMA_BALANCING
1547 int numa_scan_seq;
1548 unsigned int numa_scan_period;
1549 unsigned int numa_scan_period_max;
1550 int numa_preferred_nid;
1551 unsigned long numa_migrate_retry;
1552 u64 node_stamp;
1553 u64 last_task_numa_placement;
1554 u64 last_sum_exec_runtime;
1555 struct callback_head numa_work;
1556
1557 struct list_head numa_entry;
1558 struct numa_group *numa_group;
1559
1560
1561
1562
1563
1564
1565 unsigned long *numa_faults_memory;
1566 unsigned long total_numa_faults;
1567
1568
1569
1570
1571
1572
1573 unsigned long *numa_faults_buffer_memory;
1574
1575
1576
1577
1578
1579 unsigned long *numa_faults_cpu;
1580 unsigned long *numa_faults_buffer_cpu;
1581
1582
1583
1584
1585
1586
1587
1588 unsigned long numa_faults_locality[2];
1589
1590 unsigned long numa_pages_migrated;
1591#endif
1592
1593 struct rcu_head rcu;
1594
1595
1596
1597
1598 struct pipe_inode_info *splice_pipe;
1599
1600 struct page_frag task_frag;
1601
1602#ifdef CONFIG_TASK_DELAY_ACCT
1603 struct task_delay_info *delays;
1604#endif
1605#ifdef CONFIG_FAULT_INJECTION
1606 int make_it_fail;
1607#endif
1608
1609
1610
1611
1612 int nr_dirtied;
1613 int nr_dirtied_pause;
1614 unsigned long dirty_paused_when;
1615
1616#ifdef CONFIG_LATENCYTOP
1617 int latency_record_count;
1618 struct latency_record latency_record[LT_SAVECOUNT];
1619#endif
1620
1621
1622
1623
1624 unsigned long timer_slack_ns;
1625 unsigned long default_timer_slack_ns;
1626
1627#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1628
1629 int curr_ret_stack;
1630
1631 struct ftrace_ret_stack *ret_stack;
1632
1633 unsigned long long ftrace_timestamp;
1634
1635
1636
1637
1638 atomic_t trace_overrun;
1639
1640 atomic_t tracing_graph_pause;
1641#endif
1642#ifdef CONFIG_TRACING
1643
1644 unsigned long trace;
1645
1646 unsigned long trace_recursion;
1647#endif
1648#ifdef CONFIG_MEMCG
1649 unsigned int memcg_kmem_skip_account;
1650 struct memcg_oom_info {
1651 struct mem_cgroup *memcg;
1652 gfp_t gfp_mask;
1653 int order;
1654 unsigned int may_oom:1;
1655 } memcg_oom;
1656#endif
1657#ifdef CONFIG_UPROBES
1658 struct uprobe_task *utask;
1659#endif
1660#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1661 unsigned int sequential_io;
1662 unsigned int sequential_io_avg;
1663#endif
1664};
1665
1666
1667#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1668
1669#define TNF_MIGRATED 0x01
1670#define TNF_NO_GROUP 0x02
1671#define TNF_SHARED 0x04
1672#define TNF_FAULT_LOCAL 0x08
1673
1674#ifdef CONFIG_NUMA_BALANCING
1675extern void task_numa_fault(int last_node, int node, int pages, int flags);
1676extern pid_t task_numa_group_id(struct task_struct *p);
1677extern void set_numabalancing_state(bool enabled);
1678extern void task_numa_free(struct task_struct *p);
1679extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
1680 int src_nid, int dst_cpu);
1681#else
1682static inline void task_numa_fault(int last_node, int node, int pages,
1683 int flags)
1684{
1685}
1686static inline pid_t task_numa_group_id(struct task_struct *p)
1687{
1688 return 0;
1689}
1690static inline void set_numabalancing_state(bool enabled)
1691{
1692}
1693static inline void task_numa_free(struct task_struct *p)
1694{
1695}
1696static inline bool should_numa_migrate_memory(struct task_struct *p,
1697 struct page *page, int src_nid, int dst_cpu)
1698{
1699 return true;
1700}
1701#endif
1702
1703static inline struct pid *task_pid(struct task_struct *task)
1704{
1705 return task->pids[PIDTYPE_PID].pid;
1706}
1707
1708static inline struct pid *task_tgid(struct task_struct *task)
1709{
1710 return task->group_leader->pids[PIDTYPE_PID].pid;
1711}
1712
1713
1714
1715
1716
1717
1718static inline struct pid *task_pgrp(struct task_struct *task)
1719{
1720 return task->group_leader->pids[PIDTYPE_PGID].pid;
1721}
1722
1723static inline struct pid *task_session(struct task_struct *task)
1724{
1725 return task->group_leader->pids[PIDTYPE_SID].pid;
1726}
1727
1728struct pid_namespace;
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1744 struct pid_namespace *ns);
1745
1746static inline pid_t task_pid_nr(struct task_struct *tsk)
1747{
1748 return tsk->pid;
1749}
1750
1751static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
1752 struct pid_namespace *ns)
1753{
1754 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1755}
1756
1757static inline pid_t task_pid_vnr(struct task_struct *tsk)
1758{
1759 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1760}
1761
1762
1763static inline pid_t task_tgid_nr(struct task_struct *tsk)
1764{
1765 return tsk->tgid;
1766}
1767
1768pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1769
1770static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1771{
1772 return pid_vnr(task_tgid(tsk));
1773}
1774
1775
1776static inline int pid_alive(const struct task_struct *p);
1777static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1778{
1779 pid_t pid = 0;
1780
1781 rcu_read_lock();
1782 if (pid_alive(tsk))
1783 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1784 rcu_read_unlock();
1785
1786 return pid;
1787}
1788
1789static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1790{
1791 return task_ppid_nr_ns(tsk, &init_pid_ns);
1792}
1793
1794static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
1795 struct pid_namespace *ns)
1796{
1797 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1798}
1799
1800static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1801{
1802 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1803}
1804
1805
1806static inline pid_t task_session_nr_ns(struct task_struct *tsk,
1807 struct pid_namespace *ns)
1808{
1809 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1810}
1811
1812static inline pid_t task_session_vnr(struct task_struct *tsk)
1813{
1814 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1815}
1816
1817
1818static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1819{
1820 return task_pgrp_nr_ns(tsk, &init_pid_ns);
1821}
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833static inline int pid_alive(const struct task_struct *p)
1834{
1835 return p->pids[PIDTYPE_PID].pid != NULL;
1836}
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846static inline int is_global_init(struct task_struct *tsk)
1847{
1848 return tsk->pid == 1;
1849}
1850
1851extern struct pid *cad_pid;
1852
1853extern void free_task(struct task_struct *tsk);
1854#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
1855
1856extern void __put_task_struct(struct task_struct *t);
1857
1858static inline void put_task_struct(struct task_struct *t)
1859{
1860 if (atomic_dec_and_test(&t->usage))
1861 __put_task_struct(t);
1862}
1863
1864#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1865extern void task_cputime(struct task_struct *t,
1866 cputime_t *utime, cputime_t *stime);
1867extern void task_cputime_scaled(struct task_struct *t,
1868 cputime_t *utimescaled, cputime_t *stimescaled);
1869extern cputime_t task_gtime(struct task_struct *t);
1870#else
1871static inline void task_cputime(struct task_struct *t,
1872 cputime_t *utime, cputime_t *stime)
1873{
1874 if (utime)
1875 *utime = t->utime;
1876 if (stime)
1877 *stime = t->stime;
1878}
1879
1880static inline void task_cputime_scaled(struct task_struct *t,
1881 cputime_t *utimescaled,
1882 cputime_t *stimescaled)
1883{
1884 if (utimescaled)
1885 *utimescaled = t->utimescaled;
1886 if (stimescaled)
1887 *stimescaled = t->stimescaled;
1888}
1889
1890static inline cputime_t task_gtime(struct task_struct *t)
1891{
1892 return t->gtime;
1893}
1894#endif
1895extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1896extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1897
1898
1899
1900
1901#define PF_EXITING 0x00000004
1902#define PF_EXITPIDONE 0x00000008
1903#define PF_VCPU 0x00000010
1904#define PF_WQ_WORKER 0x00000020
1905#define PF_FORKNOEXEC 0x00000040
1906#define PF_MCE_PROCESS 0x00000080
1907#define PF_SUPERPRIV 0x00000100
1908#define PF_DUMPCORE 0x00000200
1909#define PF_SIGNALED 0x00000400
1910#define PF_MEMALLOC 0x00000800
1911#define PF_NPROC_EXCEEDED 0x00001000
1912#define PF_USED_MATH 0x00002000
1913#define PF_USED_ASYNC 0x00004000
1914#define PF_NOFREEZE 0x00008000
1915#define PF_FROZEN 0x00010000
1916#define PF_FSTRANS 0x00020000
1917#define PF_KSWAPD 0x00040000
1918#define PF_MEMALLOC_NOIO 0x00080000
1919#define PF_LESS_THROTTLE 0x00100000
1920#define PF_KTHREAD 0x00200000
1921#define PF_RANDOMIZE 0x00400000
1922#define PF_SWAPWRITE 0x00800000
1923#define PF_NO_SETAFFINITY 0x04000000
1924#define PF_MCE_EARLY 0x08000000
1925#define PF_MUTEX_TESTER 0x20000000
1926#define PF_FREEZER_SKIP 0x40000000
1927#define PF_SUSPEND_TASK 0x80000000
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1941#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1942#define clear_used_math() clear_stopped_child_used_math(current)
1943#define set_used_math() set_stopped_child_used_math(current)
1944#define conditional_stopped_child_used_math(condition, child) \
1945 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1946#define conditional_used_math(condition) \
1947 conditional_stopped_child_used_math(condition, current)
1948#define copy_to_stopped_child_used_math(child) \
1949 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1950
1951#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1952#define used_math() tsk_used_math(current)
1953
1954
1955
1956
1957static inline gfp_t memalloc_noio_flags(gfp_t flags)
1958{
1959 if (unlikely(current->flags & PF_MEMALLOC_NOIO))
1960 flags &= ~(__GFP_IO | __GFP_FS);
1961 return flags;
1962}
1963
1964static inline unsigned int memalloc_noio_save(void)
1965{
1966 unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
1967 current->flags |= PF_MEMALLOC_NOIO;
1968 return flags;
1969}
1970
1971static inline void memalloc_noio_restore(unsigned int flags)
1972{
1973 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
1974}
1975
1976
1977#define PFA_NO_NEW_PRIVS 0
1978#define PFA_SPREAD_PAGE 1
1979#define PFA_SPREAD_SLAB 2
1980
1981
1982#define TASK_PFA_TEST(name, func) \
1983 static inline bool task_##func(struct task_struct *p) \
1984 { return test_bit(PFA_##name, &p->atomic_flags); }
1985#define TASK_PFA_SET(name, func) \
1986 static inline void task_set_##func(struct task_struct *p) \
1987 { set_bit(PFA_##name, &p->atomic_flags); }
1988#define TASK_PFA_CLEAR(name, func) \
1989 static inline void task_clear_##func(struct task_struct *p) \
1990 { clear_bit(PFA_##name, &p->atomic_flags); }
1991
1992TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
1993TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
1994
1995TASK_PFA_TEST(SPREAD_PAGE, spread_page)
1996TASK_PFA_SET(SPREAD_PAGE, spread_page)
1997TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
1998
1999TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
2000TASK_PFA_SET(SPREAD_SLAB, spread_slab)
2001TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
2002
2003
2004
2005
2006#define JOBCTL_STOP_SIGMASK 0xffff
2007
2008#define JOBCTL_STOP_DEQUEUED_BIT 16
2009#define JOBCTL_STOP_PENDING_BIT 17
2010#define JOBCTL_STOP_CONSUME_BIT 18
2011#define JOBCTL_TRAP_STOP_BIT 19
2012#define JOBCTL_TRAP_NOTIFY_BIT 20
2013#define JOBCTL_TRAPPING_BIT 21
2014#define JOBCTL_LISTENING_BIT 22
2015
2016#define JOBCTL_STOP_DEQUEUED (1 << JOBCTL_STOP_DEQUEUED_BIT)
2017#define JOBCTL_STOP_PENDING (1 << JOBCTL_STOP_PENDING_BIT)
2018#define JOBCTL_STOP_CONSUME (1 << JOBCTL_STOP_CONSUME_BIT)
2019#define JOBCTL_TRAP_STOP (1 << JOBCTL_TRAP_STOP_BIT)
2020#define JOBCTL_TRAP_NOTIFY (1 << JOBCTL_TRAP_NOTIFY_BIT)
2021#define JOBCTL_TRAPPING (1 << JOBCTL_TRAPPING_BIT)
2022#define JOBCTL_LISTENING (1 << JOBCTL_LISTENING_BIT)
2023
2024#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
2025#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
2026
2027extern bool task_set_jobctl_pending(struct task_struct *task,
2028 unsigned int mask);
2029extern void task_clear_jobctl_trapping(struct task_struct *task);
2030extern void task_clear_jobctl_pending(struct task_struct *task,
2031 unsigned int mask);
2032
2033static inline void rcu_copy_process(struct task_struct *p)
2034{
2035#ifdef CONFIG_PREEMPT_RCU
2036 p->rcu_read_lock_nesting = 0;
2037 p->rcu_read_unlock_special.s = 0;
2038 p->rcu_blocked_node = NULL;
2039 INIT_LIST_HEAD(&p->rcu_node_entry);
2040#endif
2041#ifdef CONFIG_TASKS_RCU
2042 p->rcu_tasks_holdout = false;
2043 INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
2044 p->rcu_tasks_idle_cpu = -1;
2045#endif
2046}
2047
2048static inline void tsk_restore_flags(struct task_struct *task,
2049 unsigned long orig_flags, unsigned long flags)
2050{
2051 task->flags &= ~flags;
2052 task->flags |= orig_flags & flags;
2053}
2054
2055#ifdef CONFIG_SMP
2056extern void do_set_cpus_allowed(struct task_struct *p,
2057 const struct cpumask *new_mask);
2058
2059extern int set_cpus_allowed_ptr(struct task_struct *p,
2060 const struct cpumask *new_mask);
2061#else
2062static inline void do_set_cpus_allowed(struct task_struct *p,
2063 const struct cpumask *new_mask)
2064{
2065}
2066static inline int set_cpus_allowed_ptr(struct task_struct *p,
2067 const struct cpumask *new_mask)
2068{
2069 if (!cpumask_test_cpu(0, new_mask))
2070 return -EINVAL;
2071 return 0;
2072}
2073#endif
2074
2075#ifdef CONFIG_NO_HZ_COMMON
2076void calc_load_enter_idle(void);
2077void calc_load_exit_idle(void);
2078#else
2079static inline void calc_load_enter_idle(void) { }
2080static inline void calc_load_exit_idle(void) { }
2081#endif
2082
2083#ifndef CONFIG_CPUMASK_OFFSTACK
2084static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
2085{
2086 return set_cpus_allowed_ptr(p, &new_mask);
2087}
2088#endif
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098extern unsigned long long notrace sched_clock(void);
2099
2100
2101
2102extern u64 cpu_clock(int cpu);
2103extern u64 local_clock(void);
2104extern u64 sched_clock_cpu(int cpu);
2105
2106
2107extern void sched_clock_init(void);
2108
2109#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2110static inline void sched_clock_tick(void)
2111{
2112}
2113
2114static inline void sched_clock_idle_sleep_event(void)
2115{
2116}
2117
2118static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
2119{
2120}
2121#else
2122
2123
2124
2125
2126
2127
2128extern int sched_clock_stable(void);
2129extern void set_sched_clock_stable(void);
2130extern void clear_sched_clock_stable(void);
2131
2132extern void sched_clock_tick(void);
2133extern void sched_clock_idle_sleep_event(void);
2134extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2135#endif
2136
2137#ifdef CONFIG_IRQ_TIME_ACCOUNTING
2138
2139
2140
2141
2142
2143extern void enable_sched_clock_irqtime(void);
2144extern void disable_sched_clock_irqtime(void);
2145#else
2146static inline void enable_sched_clock_irqtime(void) {}
2147static inline void disable_sched_clock_irqtime(void) {}
2148#endif
2149
2150extern unsigned long long
2151task_sched_runtime(struct task_struct *task);
2152
2153
2154#ifdef CONFIG_SMP
2155extern void sched_exec(void);
2156#else
2157#define sched_exec() {}
2158#endif
2159
2160extern void sched_clock_idle_sleep_event(void);
2161extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2162
2163#ifdef CONFIG_HOTPLUG_CPU
2164extern void idle_task_exit(void);
2165#else
2166static inline void idle_task_exit(void) {}
2167#endif
2168
2169#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
2170extern void wake_up_nohz_cpu(int cpu);
2171#else
2172static inline void wake_up_nohz_cpu(int cpu) { }
2173#endif
2174
2175#ifdef CONFIG_NO_HZ_FULL
2176extern bool sched_can_stop_tick(void);
2177extern u64 scheduler_tick_max_deferment(void);
2178#else
2179static inline bool sched_can_stop_tick(void) { return false; }
2180#endif
2181
2182#ifdef CONFIG_SCHED_AUTOGROUP
2183extern void sched_autogroup_create_attach(struct task_struct *p);
2184extern void sched_autogroup_detach(struct task_struct *p);
2185extern void sched_autogroup_fork(struct signal_struct *sig);
2186extern void sched_autogroup_exit(struct signal_struct *sig);
2187#ifdef CONFIG_PROC_FS
2188extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
2189extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
2190#endif
2191#else
2192static inline void sched_autogroup_create_attach(struct task_struct *p) { }
2193static inline void sched_autogroup_detach(struct task_struct *p) { }
2194static inline void sched_autogroup_fork(struct signal_struct *sig) { }
2195static inline void sched_autogroup_exit(struct signal_struct *sig) { }
2196#endif
2197
2198extern int yield_to(struct task_struct *p, bool preempt);
2199extern void set_user_nice(struct task_struct *p, long nice);
2200extern int task_prio(const struct task_struct *p);
2201
2202
2203
2204
2205
2206
2207static inline int task_nice(const struct task_struct *p)
2208{
2209 return PRIO_TO_NICE((p)->static_prio);
2210}
2211extern int can_nice(const struct task_struct *p, const int nice);
2212extern int task_curr(const struct task_struct *p);
2213extern int idle_cpu(int cpu);
2214extern int sched_setscheduler(struct task_struct *, int,
2215 const struct sched_param *);
2216extern int sched_setscheduler_nocheck(struct task_struct *, int,
2217 const struct sched_param *);
2218extern int sched_setattr(struct task_struct *,
2219 const struct sched_attr *);
2220extern struct task_struct *idle_task(int cpu);
2221
2222
2223
2224
2225
2226
2227static inline bool is_idle_task(const struct task_struct *p)
2228{
2229 return p->pid == 0;
2230}
2231extern struct task_struct *curr_task(int cpu);
2232extern void set_curr_task(int cpu, struct task_struct *p);
2233
2234void yield(void);
2235
2236
2237
2238
2239extern struct exec_domain default_exec_domain;
2240
2241union thread_union {
2242 struct thread_info thread_info;
2243 unsigned long stack[THREAD_SIZE/sizeof(long)];
2244};
2245
2246#ifndef __HAVE_ARCH_KSTACK_END
2247static inline int kstack_end(void *addr)
2248{
2249
2250
2251
2252 return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
2253}
2254#endif
2255
2256extern union thread_union init_thread_union;
2257extern struct task_struct init_task;
2258
2259extern struct mm_struct init_mm;
2260
2261extern struct pid_namespace init_pid_ns;
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274extern struct task_struct *find_task_by_vpid(pid_t nr);
2275extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2276 struct pid_namespace *ns);
2277
2278
2279extern struct user_struct * alloc_uid(kuid_t);
2280static inline struct user_struct *get_uid(struct user_struct *u)
2281{
2282 atomic_inc(&u->__count);
2283 return u;
2284}
2285extern void free_uid(struct user_struct *);
2286
2287#include <asm/current.h>
2288
2289extern void xtime_update(unsigned long ticks);
2290
2291extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2292extern int wake_up_process(struct task_struct *tsk);
2293extern void wake_up_new_task(struct task_struct *tsk);
2294#ifdef CONFIG_SMP
2295 extern void kick_process(struct task_struct *tsk);
2296#else
2297 static inline void kick_process(struct task_struct *tsk) { }
2298#endif
2299extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
2300extern void sched_dead(struct task_struct *p);
2301
2302extern void proc_caches_init(void);
2303extern void flush_signals(struct task_struct *);
2304extern void __flush_signals(struct task_struct *);
2305extern void ignore_signals(struct task_struct *);
2306extern void flush_signal_handlers(struct task_struct *, int force_default);
2307extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
2308
2309static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
2310{
2311 unsigned long flags;
2312 int ret;
2313
2314 spin_lock_irqsave(&tsk->sighand->siglock, flags);
2315 ret = dequeue_signal(tsk, mask, info);
2316 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
2317
2318 return ret;
2319}
2320
2321extern void block_all_signals(int (*notifier)(void *priv), void *priv,
2322 sigset_t *mask);
2323extern void unblock_all_signals(void);
2324extern void release_task(struct task_struct * p);
2325extern int send_sig_info(int, struct siginfo *, struct task_struct *);
2326extern int force_sigsegv(int, struct task_struct *);
2327extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2328extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
2329extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2330extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
2331 const struct cred *, u32);
2332extern int kill_pgrp(struct pid *pid, int sig, int priv);
2333extern int kill_pid(struct pid *pid, int sig, int priv);
2334extern int kill_proc_info(int, struct siginfo *, pid_t);
2335extern __must_check bool do_notify_parent(struct task_struct *, int);
2336extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2337extern void force_sig(int, struct task_struct *);
2338extern int send_sig(int, struct task_struct *, int);
2339extern int zap_other_threads(struct task_struct *p);
2340extern struct sigqueue *sigqueue_alloc(void);
2341extern void sigqueue_free(struct sigqueue *);
2342extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
2343extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
2344
2345static inline void restore_saved_sigmask(void)
2346{
2347 if (test_and_clear_restore_sigmask())
2348 __set_current_blocked(¤t->saved_sigmask);
2349}
2350
2351static inline sigset_t *sigmask_to_save(void)
2352{
2353 sigset_t *res = ¤t->blocked;
2354 if (unlikely(test_restore_sigmask()))
2355 res = ¤t->saved_sigmask;
2356 return res;
2357}
2358
2359static inline int kill_cad_pid(int sig, int priv)
2360{
2361 return kill_pid(cad_pid, sig, priv);
2362}
2363
2364
2365#define SEND_SIG_NOINFO ((struct siginfo *) 0)
2366#define SEND_SIG_PRIV ((struct siginfo *) 1)
2367#define SEND_SIG_FORCED ((struct siginfo *) 2)
2368
2369
2370
2371
2372static inline int on_sig_stack(unsigned long sp)
2373{
2374#ifdef CONFIG_STACK_GROWSUP
2375 return sp >= current->sas_ss_sp &&
2376 sp - current->sas_ss_sp < current->sas_ss_size;
2377#else
2378 return sp > current->sas_ss_sp &&
2379 sp - current->sas_ss_sp <= current->sas_ss_size;
2380#endif
2381}
2382
2383static inline int sas_ss_flags(unsigned long sp)
2384{
2385 if (!current->sas_ss_size)
2386 return SS_DISABLE;
2387
2388 return on_sig_stack(sp) ? SS_ONSTACK : 0;
2389}
2390
2391static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
2392{
2393 if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
2394#ifdef CONFIG_STACK_GROWSUP
2395 return current->sas_ss_sp;
2396#else
2397 return current->sas_ss_sp + current->sas_ss_size;
2398#endif
2399 return sp;
2400}
2401
2402
2403
2404
2405extern struct mm_struct * mm_alloc(void);
2406
2407
2408extern void __mmdrop(struct mm_struct *);
2409static inline void mmdrop(struct mm_struct * mm)
2410{
2411 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
2412 __mmdrop(mm);
2413}
2414
2415
2416extern void mmput(struct mm_struct *);
2417
2418extern struct mm_struct *get_task_mm(struct task_struct *task);
2419
2420
2421
2422
2423
2424extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
2425
2426extern void mm_release(struct task_struct *, struct mm_struct *);
2427
2428extern int copy_thread(unsigned long, unsigned long, unsigned long,
2429 struct task_struct *);
2430extern void flush_thread(void);
2431extern void exit_thread(void);
2432
2433extern void exit_files(struct task_struct *);
2434extern void __cleanup_sighand(struct sighand_struct *);
2435
2436extern void exit_itimers(struct signal_struct *);
2437extern void flush_itimer_signals(void);
2438
2439extern void do_group_exit(int);
2440
2441extern int do_execve(struct filename *,
2442 const char __user * const __user *,
2443 const char __user * const __user *);
2444extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
2445struct task_struct *fork_idle(int);
2446extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
2447
2448extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
2449static inline void set_task_comm(struct task_struct *tsk, const char *from)
2450{
2451 __set_task_comm(tsk, from, false);
2452}
2453extern char *get_task_comm(char *to, struct task_struct *tsk);
2454
2455#ifdef CONFIG_SMP
2456void scheduler_ipi(void);
2457extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
2458#else
2459static inline void scheduler_ipi(void) { }
2460static inline unsigned long wait_task_inactive(struct task_struct *p,
2461 long match_state)
2462{
2463 return 1;
2464}
2465#endif
2466
2467#define next_task(p) \
2468 list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
2469
2470#define for_each_process(p) \
2471 for (p = &init_task ; (p = next_task(p)) != &init_task ; )
2472
2473extern bool current_is_single_threaded(void);
2474
2475
2476
2477
2478
2479#define do_each_thread(g, t) \
2480 for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
2481
2482#define while_each_thread(g, t) \
2483 while ((t = next_thread(t)) != g)
2484
2485#define __for_each_thread(signal, t) \
2486 list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
2487
2488#define for_each_thread(p, t) \
2489 __for_each_thread((p)->signal, t)
2490
2491
2492#define for_each_process_thread(p, t) \
2493 for_each_process(p) for_each_thread(p, t)
2494
2495static inline int get_nr_threads(struct task_struct *tsk)
2496{
2497 return tsk->signal->nr_threads;
2498}
2499
2500static inline bool thread_group_leader(struct task_struct *p)
2501{
2502 return p->exit_signal >= 0;
2503}
2504
2505
2506
2507
2508
2509
2510
2511static inline bool has_group_leader_pid(struct task_struct *p)
2512{
2513 return task_pid(p) == p->signal->leader_pid;
2514}
2515
2516static inline
2517bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
2518{
2519 return p1->signal == p2->signal;
2520}
2521
2522static inline struct task_struct *next_thread(const struct task_struct *p)
2523{
2524 return list_entry_rcu(p->thread_group.next,
2525 struct task_struct, thread_group);
2526}
2527
2528static inline int thread_group_empty(struct task_struct *p)
2529{
2530 return list_empty(&p->thread_group);
2531}
2532
2533#define delay_group_leader(p) \
2534 (thread_group_leader(p) && !thread_group_empty(p))
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546static inline void task_lock(struct task_struct *p)
2547{
2548 spin_lock(&p->alloc_lock);
2549}
2550
2551static inline void task_unlock(struct task_struct *p)
2552{
2553 spin_unlock(&p->alloc_lock);
2554}
2555
2556extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2557 unsigned long *flags);
2558
2559static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
2560 unsigned long *flags)
2561{
2562 struct sighand_struct *ret;
2563
2564 ret = __lock_task_sighand(tsk, flags);
2565 (void)__cond_lock(&tsk->sighand->siglock, ret);
2566 return ret;
2567}
2568
2569static inline void unlock_task_sighand(struct task_struct *tsk,
2570 unsigned long *flags)
2571{
2572 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2573}
2574
2575#ifdef CONFIG_CGROUPS
2576static inline void threadgroup_change_begin(struct task_struct *tsk)
2577{
2578 down_read(&tsk->signal->group_rwsem);
2579}
2580static inline void threadgroup_change_end(struct task_struct *tsk)
2581{
2582 up_read(&tsk->signal->group_rwsem);
2583}
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601static inline void threadgroup_lock(struct task_struct *tsk)
2602{
2603 down_write(&tsk->signal->group_rwsem);
2604}
2605
2606
2607
2608
2609
2610
2611
2612static inline void threadgroup_unlock(struct task_struct *tsk)
2613{
2614 up_write(&tsk->signal->group_rwsem);
2615}
2616#else
2617static inline void threadgroup_change_begin(struct task_struct *tsk) {}
2618static inline void threadgroup_change_end(struct task_struct *tsk) {}
2619static inline void threadgroup_lock(struct task_struct *tsk) {}
2620static inline void threadgroup_unlock(struct task_struct *tsk) {}
2621#endif
2622
2623#ifndef __HAVE_THREAD_FUNCTIONS
2624
2625#define task_thread_info(task) ((struct thread_info *)(task)->stack)
2626#define task_stack_page(task) ((task)->stack)
2627
2628static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
2629{
2630 *task_thread_info(p) = *task_thread_info(org);
2631 task_thread_info(p)->task = p;
2632}
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643static inline unsigned long *end_of_stack(struct task_struct *p)
2644{
2645#ifdef CONFIG_STACK_GROWSUP
2646 return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
2647#else
2648 return (unsigned long *)(task_thread_info(p) + 1);
2649#endif
2650}
2651
2652#endif
2653#define task_stack_end_corrupted(task) \
2654 (*(end_of_stack(task)) != STACK_END_MAGIC)
2655
2656static inline int object_is_on_stack(void *obj)
2657{
2658 void *stack = task_stack_page(current);
2659
2660 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
2661}
2662
2663extern void thread_info_cache_init(void);
2664
2665#ifdef CONFIG_DEBUG_STACK_USAGE
2666static inline unsigned long stack_not_used(struct task_struct *p)
2667{
2668 unsigned long *n = end_of_stack(p);
2669
2670 do {
2671 n++;
2672 } while (!*n);
2673
2674 return (unsigned long)n - (unsigned long)end_of_stack(p);
2675}
2676#endif
2677extern void set_task_stack_end_magic(struct task_struct *tsk);
2678
2679
2680
2681
2682static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
2683{
2684 set_ti_thread_flag(task_thread_info(tsk), flag);
2685}
2686
2687static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2688{
2689 clear_ti_thread_flag(task_thread_info(tsk), flag);
2690}
2691
2692static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
2693{
2694 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
2695}
2696
2697static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2698{
2699 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
2700}
2701
2702static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2703{
2704 return test_ti_thread_flag(task_thread_info(tsk), flag);
2705}
2706
2707static inline void set_tsk_need_resched(struct task_struct *tsk)
2708{
2709 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2710}
2711
2712static inline void clear_tsk_need_resched(struct task_struct *tsk)
2713{
2714 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2715}
2716
2717static inline int test_tsk_need_resched(struct task_struct *tsk)
2718{
2719 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2720}
2721
2722static inline int restart_syscall(void)
2723{
2724 set_tsk_thread_flag(current, TIF_SIGPENDING);
2725 return -ERESTARTNOINTR;
2726}
2727
2728static inline int signal_pending(struct task_struct *p)
2729{
2730 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
2731}
2732
2733static inline int __fatal_signal_pending(struct task_struct *p)
2734{
2735 return unlikely(sigismember(&p->pending.signal, SIGKILL));
2736}
2737
2738static inline int fatal_signal_pending(struct task_struct *p)
2739{
2740 return signal_pending(p) && __fatal_signal_pending(p);
2741}
2742
2743static inline int signal_pending_state(long state, struct task_struct *p)
2744{
2745 if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
2746 return 0;
2747 if (!signal_pending(p))
2748 return 0;
2749
2750 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
2751}
2752
2753
2754
2755
2756
2757
2758
2759
2760extern int _cond_resched(void);
2761
2762#define cond_resched() ({ \
2763 __might_sleep(__FILE__, __LINE__, 0); \
2764 _cond_resched(); \
2765})
2766
2767extern int __cond_resched_lock(spinlock_t *lock);
2768
2769#ifdef CONFIG_PREEMPT_COUNT
2770#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
2771#else
2772#define PREEMPT_LOCK_OFFSET 0
2773#endif
2774
2775#define cond_resched_lock(lock) ({ \
2776 __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
2777 __cond_resched_lock(lock); \
2778})
2779
2780extern int __cond_resched_softirq(void);
2781
2782#define cond_resched_softirq() ({ \
2783 __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
2784 __cond_resched_softirq(); \
2785})
2786
2787static inline void cond_resched_rcu(void)
2788{
2789#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
2790 rcu_read_unlock();
2791 cond_resched();
2792 rcu_read_lock();
2793#endif
2794}
2795
2796
2797
2798
2799
2800
2801static inline int spin_needbreak(spinlock_t *lock)
2802{
2803#ifdef CONFIG_PREEMPT
2804 return spin_is_contended(lock);
2805#else
2806 return 0;
2807#endif
2808}
2809
2810
2811
2812
2813
2814#ifdef TIF_POLLING_NRFLAG
2815static inline int tsk_is_polling(struct task_struct *p)
2816{
2817 return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
2818}
2819
2820static inline void __current_set_polling(void)
2821{
2822 set_thread_flag(TIF_POLLING_NRFLAG);
2823}
2824
2825static inline bool __must_check current_set_polling_and_test(void)
2826{
2827 __current_set_polling();
2828
2829
2830
2831
2832
2833 smp_mb__after_atomic();
2834
2835 return unlikely(tif_need_resched());
2836}
2837
2838static inline void __current_clr_polling(void)
2839{
2840 clear_thread_flag(TIF_POLLING_NRFLAG);
2841}
2842
2843static inline bool __must_check current_clr_polling_and_test(void)
2844{
2845 __current_clr_polling();
2846
2847
2848
2849
2850
2851 smp_mb__after_atomic();
2852
2853 return unlikely(tif_need_resched());
2854}
2855
2856#else
2857static inline int tsk_is_polling(struct task_struct *p) { return 0; }
2858static inline void __current_set_polling(void) { }
2859static inline void __current_clr_polling(void) { }
2860
2861static inline bool __must_check current_set_polling_and_test(void)
2862{
2863 return unlikely(tif_need_resched());
2864}
2865static inline bool __must_check current_clr_polling_and_test(void)
2866{
2867 return unlikely(tif_need_resched());
2868}
2869#endif
2870
2871static inline void current_clr_polling(void)
2872{
2873 __current_clr_polling();
2874
2875
2876
2877
2878
2879
2880
2881 smp_mb();
2882
2883 preempt_fold_need_resched();
2884}
2885
2886static __always_inline bool need_resched(void)
2887{
2888 return unlikely(tif_need_resched());
2889}
2890
2891
2892
2893
2894void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
2895void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2896
2897static inline void thread_group_cputime_init(struct signal_struct *sig)
2898{
2899 raw_spin_lock_init(&sig->cputimer.lock);
2900}
2901
2902
2903
2904
2905
2906
2907
2908extern void recalc_sigpending_and_wake(struct task_struct *t);
2909extern void recalc_sigpending(void);
2910
2911extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
2912
2913static inline void signal_wake_up(struct task_struct *t, bool resume)
2914{
2915 signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
2916}
2917static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
2918{
2919 signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
2920}
2921
2922
2923
2924
2925#ifdef CONFIG_SMP
2926
2927static inline unsigned int task_cpu(const struct task_struct *p)
2928{
2929 return task_thread_info(p)->cpu;
2930}
2931
2932static inline int task_node(const struct task_struct *p)
2933{
2934 return cpu_to_node(task_cpu(p));
2935}
2936
2937extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
2938
2939#else
2940
2941static inline unsigned int task_cpu(const struct task_struct *p)
2942{
2943 return 0;
2944}
2945
2946static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2947{
2948}
2949
2950#endif
2951
2952extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2953extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2954
2955#ifdef CONFIG_CGROUP_SCHED
2956extern struct task_group root_task_group;
2957#endif
2958
2959extern int task_can_switch_user(struct user_struct *up,
2960 struct task_struct *tsk);
2961
2962#ifdef CONFIG_TASK_XACCT
2963static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2964{
2965 tsk->ioac.rchar += amt;
2966}
2967
2968static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2969{
2970 tsk->ioac.wchar += amt;
2971}
2972
2973static inline void inc_syscr(struct task_struct *tsk)
2974{
2975 tsk->ioac.syscr++;
2976}
2977
2978static inline void inc_syscw(struct task_struct *tsk)
2979{
2980 tsk->ioac.syscw++;
2981}
2982#else
2983static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2984{
2985}
2986
2987static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2988{
2989}
2990
2991static inline void inc_syscr(struct task_struct *tsk)
2992{
2993}
2994
2995static inline void inc_syscw(struct task_struct *tsk)
2996{
2997}
2998#endif
2999
3000#ifndef TASK_SIZE_OF
3001#define TASK_SIZE_OF(tsk) TASK_SIZE
3002#endif
3003
3004#ifdef CONFIG_MEMCG
3005extern void mm_update_next_owner(struct mm_struct *mm);
3006#else
3007static inline void mm_update_next_owner(struct mm_struct *mm)
3008{
3009}
3010#endif
3011
3012static inline unsigned long task_rlimit(const struct task_struct *tsk,
3013 unsigned int limit)
3014{
3015 return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
3016}
3017
3018static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
3019 unsigned int limit)
3020{
3021 return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
3022}
3023
3024static inline unsigned long rlimit(unsigned int limit)
3025{
3026 return task_rlimit(current, limit);
3027}
3028
3029static inline unsigned long rlimit_max(unsigned int limit)
3030{
3031 return task_rlimit_max(current, limit);
3032}
3033
3034#endif
3035