1#ifndef _LINUX_SCHED_H
2#define _LINUX_SCHED_H
3
4#include <uapi/linux/sched.h>
5#include <linux/rh_kabi.h>
6
7struct sched_param {
8 int sched_priority;
9};
10
11#include <asm/param.h>
12
13#include <linux/capability.h>
14#include <linux/threads.h>
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/timex.h>
18#include <linux/jiffies.h>
19#include <linux/plist.h>
20#include <linux/rbtree.h>
21#include <linux/thread_info.h>
22#include <linux/cpumask.h>
23#include <linux/errno.h>
24#include <linux/nodemask.h>
25#include <linux/mm_types.h>
26
27#include <asm/page.h>
28#include <asm/ptrace.h>
29#include <linux/cputime.h>
30
31#include <linux/smp.h>
32#include <linux/sem.h>
33#include <linux/signal.h>
34#include <linux/compiler.h>
35#include <linux/completion.h>
36#include <linux/pid.h>
37#include <linux/percpu.h>
38#include <linux/topology.h>
39#include <linux/proportions.h>
40#include <linux/seccomp.h>
41#include <linux/rcupdate.h>
42#include <linux/rculist.h>
43#include <linux/rtmutex.h>
44
45#include <linux/time.h>
46#include <linux/param.h>
47#include <linux/resource.h>
48#include <linux/timer.h>
49#include <linux/hrtimer.h>
50#include <linux/task_io_accounting.h>
51#include <linux/latencytop.h>
52#include <linux/cred.h>
53#include <linux/llist.h>
54#include <linux/uidgid.h>
55#include <linux/gfp.h>
56
57#include <asm/processor.h>
58
59#define SCHED_ATTR_SIZE_VER0 48
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105struct sched_attr {
106 u32 size;
107
108 u32 sched_policy;
109 u64 sched_flags;
110
111
112 s32 sched_nice;
113
114
115 u32 sched_priority;
116
117
118 u64 sched_runtime;
119 u64 sched_deadline;
120 u64 sched_period;
121};
122
123struct exec_domain;
124struct futex_pi_state;
125struct robust_list_head;
126struct bio_list;
127struct fs_struct;
128struct perf_event_context;
129struct blk_plug;
130struct filename;
131
132
133
134
135
136#define CLONE_KERNEL (CLONE_FS | CLONE_FILES | CLONE_SIGHAND)
137
138
139
140
141
142
143
144
145
146
147
148extern unsigned long avenrun[];
149extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
150
151#define FSHIFT 11
152#define FIXED_1 (1<<FSHIFT)
153#define LOAD_FREQ (5*HZ+1)
154#define EXP_1 1884
155#define EXP_5 2014
156#define EXP_15 2037
157
158#define CALC_LOAD(load,exp,n) \
159 load *= exp; \
160 load += n*(FIXED_1-exp); \
161 load >>= FSHIFT;
162
163extern unsigned long total_forks;
164extern int nr_threads;
165DECLARE_PER_CPU(unsigned long, process_counts);
166extern int nr_processes(void);
167extern unsigned long nr_running(void);
168extern bool single_task_running(void);
169extern unsigned long nr_iowait(void);
170extern unsigned long nr_iowait_cpu(int cpu);
171extern unsigned long this_cpu_load(void);
172
173
174extern void calc_global_load(unsigned long ticks);
175extern void update_cpu_load_nohz(void);
176
177extern unsigned long get_parent_ip(unsigned long addr);
178
179extern void dump_cpu_task(int cpu);
180
181struct seq_file;
182struct cfs_rq;
183struct task_group;
184#ifdef CONFIG_SCHED_DEBUG
185extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
186extern void proc_sched_set_task(struct task_struct *p);
187extern void
188print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
189#endif
190
191
192
193
194
195
196
197
198
199
200
201#define TASK_RUNNING 0
202#define TASK_INTERRUPTIBLE 1
203#define TASK_UNINTERRUPTIBLE 2
204#define __TASK_STOPPED 4
205#define __TASK_TRACED 8
206
207#define EXIT_ZOMBIE 16
208#define EXIT_DEAD 32
209
210#define TASK_DEAD 64
211#define TASK_WAKEKILL 128
212#define TASK_WAKING 256
213#define TASK_PARKED 512
214#define TASK_STATE_MAX 1024
215
216#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
217
218extern char ___assert_task_state[1 - 2*!!(
219 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
220
221
222#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
223#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
224#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
225
226
227#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
228#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
229
230
231#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
232 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
233 __TASK_TRACED)
234
235#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
236#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
237#define task_is_dead(task) ((task)->exit_state != 0)
238#define task_is_stopped_or_traced(task) \
239 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
240#define task_contributes_to_load(task) \
241 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
242 (task->flags & PF_FROZEN) == 0)
243
244#define __set_task_state(tsk, state_value) \
245 do { (tsk)->state = (state_value); } while (0)
246#define set_task_state(tsk, state_value) \
247 set_mb((tsk)->state, (state_value))
248
249
250
251
252
253
254
255
256
257
258
259
260#define __set_current_state(state_value) \
261 do { current->state = (state_value); } while (0)
262#define set_current_state(state_value) \
263 set_mb(current->state, (state_value))
264
265
266#define TASK_COMM_LEN 16
267
268#include <linux/spinlock.h>
269
270
271
272
273
274
275
276extern qrwlock_t tasklist_lock;
277extern spinlock_t mmlist_lock;
278
279extern void tasklist_write_lock_irq(void);
280extern void tasklist_read_lock(void);
281
282struct task_struct;
283
284#ifdef CONFIG_PROVE_RCU
285extern int lockdep_tasklist_lock_is_held(void);
286#endif
287
288extern void sched_init(void);
289extern void sched_init_smp(void);
290extern asmlinkage void schedule_tail(struct task_struct *prev);
291extern void init_idle(struct task_struct *idle, int cpu);
292extern void init_idle_bootup_task(struct task_struct *idle);
293
294extern cpumask_var_t cpu_isolated_map;
295
296extern int runqueue_is_locked(int cpu);
297
298#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
299extern void nohz_balance_enter_idle(int cpu);
300extern void set_cpu_sd_state_idle(void);
301extern int get_nohz_timer_target(void);
302#else
303static inline void nohz_balance_enter_idle(int cpu) { }
304static inline void set_cpu_sd_state_idle(void) { }
305#endif
306
307
308
309
310extern void show_state_filter(unsigned long state_filter);
311
312static inline void show_state(void)
313{
314 show_state_filter(0);
315}
316
317extern void show_regs(struct pt_regs *);
318
319
320
321
322
323
324extern void show_stack(struct task_struct *task, unsigned long *sp);
325
326void io_schedule(void);
327long io_schedule_timeout(long timeout);
328
329extern void cpu_init (void);
330extern void trap_init(void);
331extern void update_process_times(int user);
332extern void scheduler_tick(void);
333
334extern void sched_show_task(struct task_struct *p);
335
336#ifdef CONFIG_LOCKUP_DETECTOR
337extern void touch_softlockup_watchdog_sched(void);
338extern void touch_softlockup_watchdog(void);
339extern void touch_softlockup_watchdog_sync(void);
340extern void touch_all_softlockup_watchdogs(void);
341extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
342 void __user *buffer,
343 size_t *lenp, loff_t *ppos);
344extern unsigned int softlockup_panic;
345extern unsigned int hardlockup_panic;
346void lockup_detector_init(void);
347#else
348static inline void touch_softlockup_watchdog_sched(void)
349{
350}
351static inline void touch_softlockup_watchdog(void)
352{
353}
354static inline void touch_softlockup_watchdog_sync(void)
355{
356}
357static inline void touch_all_softlockup_watchdogs(void)
358{
359}
360static inline void lockup_detector_init(void)
361{
362}
363#endif
364
365#ifdef CONFIG_DETECT_HUNG_TASK
366void reset_hung_task_detector(void);
367#else
368static inline void reset_hung_task_detector(void)
369{
370}
371#endif
372
373
374#define __sched __attribute__((__section__(".sched.text")))
375
376
377extern char __sched_text_start[], __sched_text_end[];
378
379
380extern int in_sched_functions(unsigned long addr);
381
382#define MAX_SCHEDULE_TIMEOUT LONG_MAX
383extern signed long schedule_timeout(signed long timeout);
384extern signed long schedule_timeout_interruptible(signed long timeout);
385extern signed long schedule_timeout_killable(signed long timeout);
386extern signed long schedule_timeout_uninterruptible(signed long timeout);
387asmlinkage void schedule(void);
388extern void schedule_preempt_disabled(void);
389
390struct nsproxy;
391struct user_namespace;
392
393#ifdef CONFIG_MMU
394extern void arch_pick_mmap_layout(struct mm_struct *mm);
395extern unsigned long
396arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
397 unsigned long, unsigned long);
398extern unsigned long
399arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
400 unsigned long len, unsigned long pgoff,
401 unsigned long flags);
402extern void arch_unmap_area(struct mm_struct *, unsigned long);
403extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
404#else
405static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
406#endif
407
408
409extern void set_dumpable(struct mm_struct *mm, int value);
410extern int get_dumpable(struct mm_struct *mm);
411
412#define SUID_DUMP_DISABLE 0
413#define SUID_DUMP_USER 1
414#define SUID_DUMP_ROOT 2
415
416
417
418#define MMF_DUMPABLE 0
419#define MMF_DUMP_SECURELY 1
420
421#define MMF_DUMPABLE_BITS 2
422#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
423
424
425#define MMF_DUMP_ANON_PRIVATE 2
426#define MMF_DUMP_ANON_SHARED 3
427#define MMF_DUMP_MAPPED_PRIVATE 4
428#define MMF_DUMP_MAPPED_SHARED 5
429#define MMF_DUMP_ELF_HEADERS 6
430#define MMF_DUMP_HUGETLB_PRIVATE 7
431#define MMF_DUMP_HUGETLB_SHARED 8
432#define MMF_DUMP_DAX_PRIVATE 9
433#define MMF_DUMP_DAX_SHARED 10
434
435#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
436#define MMF_DUMP_FILTER_BITS 9
437#define MMF_DUMP_FILTER_MASK \
438 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
439#define MMF_DUMP_FILTER_DEFAULT \
440 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\
441 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
442
443#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
444# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS)
445#else
446# define MMF_DUMP_MASK_DEFAULT_ELF 0
447#endif
448
449#define MMF_VM_MERGEABLE 16
450#define MMF_VM_HUGEPAGE 17
451#define MMF_EXE_FILE_CHANGED 18
452
453#define MMF_HAS_UPROBES 19
454#define MMF_RECALC_UPROBES 20
455
456#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
457
458struct sighand_struct {
459 atomic_t count;
460 struct k_sigaction action[_NSIG];
461 spinlock_t siglock;
462 wait_queue_head_t signalfd_wqh;
463};
464
465struct pacct_struct {
466 int ac_flag;
467 long ac_exitcode;
468 unsigned long ac_mem;
469 cputime_t ac_utime, ac_stime;
470 unsigned long ac_minflt, ac_majflt;
471};
472
473struct cpu_itimer {
474 cputime_t expires;
475 cputime_t incr;
476 u32 error;
477 u32 incr_error;
478};
479
480
481
482
483
484
485
486
487
488
489struct prev_cputime {
490#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
491 cputime_t utime;
492 cputime_t stime;
493 raw_spinlock_t lock;
494#endif
495};
496
497
498struct cputime {
499 cputime_t utime;
500 cputime_t stime;
501};
502
503static inline void prev_cputime_init(struct prev_cputime *prev)
504{
505#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
506 prev->utime = prev->stime = 0;
507 raw_spin_lock_init(&prev->lock);
508#endif
509}
510
511
512
513
514
515
516
517
518
519
520
521struct task_cputime {
522 cputime_t utime;
523 cputime_t stime;
524 unsigned long long sum_exec_runtime;
525};
526
527
528#define virt_exp utime
529#define prof_exp stime
530#define sched_exp sum_exec_runtime
531
532#define INIT_CPUTIME \
533 (struct task_cputime) { \
534 .utime = 0, \
535 .stime = 0, \
536 .sum_exec_runtime = 0, \
537 }
538
539
540
541
542
543
544
545
546#define INIT_PREEMPT_COUNT (1 + PREEMPT_ACTIVE)
547
548
549
550
551
552
553
554
555
556
557
558struct thread_group_cputimer {
559 struct task_cputime cputime;
560 int running;
561 raw_spinlock_t lock;
562};
563
564#include <linux/rwsem.h>
565struct autogroup;
566
567
568
569
570
571
572
573
574struct signal_struct {
575 atomic_t sigcnt;
576 atomic_t live;
577 int nr_threads;
578 struct list_head thread_head;
579
580 wait_queue_head_t wait_chldexit;
581
582
583 struct task_struct *curr_target;
584
585
586 struct sigpending shared_pending;
587
588
589 int group_exit_code;
590
591
592
593
594
595 int notify_count;
596 struct task_struct *group_exit_task;
597
598
599 int group_stop_count;
600 unsigned int flags;
601
602
603
604
605
606
607
608
609
610
611 unsigned int is_child_subreaper:1;
612 unsigned int has_child_subreaper:1;
613
614
615 int posix_timer_id;
616 struct list_head posix_timers;
617
618
619 struct hrtimer real_timer;
620 struct pid *leader_pid;
621 ktime_t it_real_incr;
622
623
624
625
626
627
628 struct cpu_itimer it[2];
629
630
631
632
633
634 struct thread_group_cputimer cputimer;
635
636
637 struct task_cputime cputime_expires;
638
639 struct list_head cpu_timers[3];
640
641 struct pid *tty_old_pgrp;
642
643
644 int leader;
645
646 struct tty_struct *tty;
647
648#ifdef CONFIG_SCHED_AUTOGROUP
649 struct autogroup *autogroup;
650#endif
651
652
653
654
655
656
657 cputime_t utime, stime, cutime, cstime;
658 cputime_t gtime;
659 cputime_t cgtime;
660#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
661 RH_KABI_DEPRECATE(struct cputime, prev_cputime)
662#endif
663 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
664 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
665 unsigned long inblock, oublock, cinblock, coublock;
666 unsigned long maxrss, cmaxrss;
667 struct task_io_accounting ioac;
668
669
670
671
672
673
674
675 unsigned long long sum_sched_runtime;
676
677
678
679
680
681
682
683
684
685
686 struct rlimit rlim[RLIM_NLIMITS];
687
688#ifdef CONFIG_BSD_PROCESS_ACCT
689 struct pacct_struct pacct;
690#endif
691#ifdef CONFIG_TASKSTATS
692 struct taskstats *stats;
693#endif
694#ifdef CONFIG_AUDIT
695 unsigned audit_tty;
696 unsigned audit_tty_log_passwd;
697 struct tty_audit_buf *tty_audit_buf;
698#endif
699#ifdef CONFIG_CGROUPS
700
701
702
703
704
705
706
707
708
709 struct rw_semaphore group_rwsem;
710#endif
711
712 oom_flags_t oom_flags;
713 short oom_score_adj;
714 short oom_score_adj_min;
715
716
717 struct mutex cred_guard_mutex;
718
719
720
721
722 RH_KABI_USE(1, seqlock_t stats_lock)
723 RH_KABI_RESERVE(2)
724 RH_KABI_RESERVE(3)
725 RH_KABI_RESERVE(4)
726#ifndef __GENKSYMS__
727 struct prev_cputime prev_cputime;
728#endif
729};
730
731
732
733
734#define SIGNAL_STOP_STOPPED 0x00000001
735#define SIGNAL_STOP_CONTINUED 0x00000002
736#define SIGNAL_GROUP_EXIT 0x00000004
737#define SIGNAL_GROUP_COREDUMP 0x00000008
738
739
740
741#define SIGNAL_CLD_STOPPED 0x00000010
742#define SIGNAL_CLD_CONTINUED 0x00000020
743#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
744
745#define SIGNAL_UNKILLABLE 0x00000040
746
747
748static inline int signal_group_exit(const struct signal_struct *sig)
749{
750 return (sig->flags & SIGNAL_GROUP_EXIT) ||
751 (sig->group_exit_task != NULL);
752}
753
754
755
756
757struct user_struct {
758 atomic_t __count;
759 atomic_t processes;
760 atomic_t files;
761 atomic_t sigpending;
762#ifdef CONFIG_INOTIFY_USER
763 atomic_t inotify_watches;
764 atomic_t inotify_devs;
765#endif
766#ifdef CONFIG_FANOTIFY
767 atomic_t fanotify_listeners;
768#endif
769#ifdef CONFIG_EPOLL
770 atomic_long_t epoll_watches;
771#endif
772#ifdef CONFIG_POSIX_MQUEUE
773
774 unsigned long mq_bytes;
775#endif
776 unsigned long locked_shm;
777
778#ifdef CONFIG_KEYS
779 struct key *uid_keyring;
780 struct key *session_keyring;
781#endif
782
783
784 struct hlist_node uidhash_node;
785 kuid_t uid;
786
787#ifdef CONFIG_PERF_EVENTS
788 atomic_long_t locked_vm;
789#endif
790 RH_KABI_EXTEND(unsigned long unix_inflight)
791 RH_KABI_EXTEND(atomic_long_t pipe_bufs)
792};
793
794extern int uids_sysfs_init(void);
795
796extern struct user_struct *find_user(kuid_t);
797
798extern struct user_struct root_user;
799#define INIT_USER (&root_user)
800
801
802struct backing_dev_info;
803struct reclaim_state;
804
805#ifdef CONFIG_SCHED_INFO
806struct sched_info {
807
808 unsigned long pcount;
809 unsigned long long run_delay;
810
811
812 unsigned long long last_arrival,
813 last_queued;
814};
815#endif
816
817#ifdef CONFIG_TASK_DELAY_ACCT
818struct task_delay_info {
819 spinlock_t lock;
820 unsigned int flags;
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837 struct timespec blkio_start, blkio_end;
838 u64 blkio_delay;
839 u64 swapin_delay;
840 u32 blkio_count;
841
842 u32 swapin_count;
843
844
845 struct timespec freepages_start, freepages_end;
846 u64 freepages_delay;
847 u32 freepages_count;
848};
849#endif
850
851static inline int sched_info_on(void)
852{
853#ifdef CONFIG_SCHEDSTATS
854 return 1;
855#elif defined(CONFIG_TASK_DELAY_ACCT)
856 extern int delayacct_on;
857 return delayacct_on;
858#else
859 return 0;
860#endif
861}
862
863#ifdef CONFIG_SCHEDSTATS
864void force_schedstat_enabled(void);
865#endif
866
867enum cpu_idle_type {
868 CPU_IDLE,
869 CPU_NOT_IDLE,
870 CPU_NEWLY_IDLE,
871 CPU_MAX_IDLE_TYPES
872};
873
874
875
876
877#define SCHED_POWER_SHIFT 10
878#define SCHED_POWER_SCALE (1L << SCHED_POWER_SHIFT)
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906struct wake_q_node {
907 struct wake_q_node *next;
908};
909
910struct wake_q_head {
911 struct wake_q_node *first;
912 struct wake_q_node **lastp;
913};
914
915#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
916
917#define WAKE_Q(name) \
918 struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
919
920extern void wake_q_add(struct wake_q_head *head,
921 struct task_struct *task);
922extern void wake_up_q(struct wake_q_head *head);
923
924
925
926
927#ifdef CONFIG_SMP
928#define SD_LOAD_BALANCE 0x0001
929#define SD_BALANCE_NEWIDLE 0x0002
930#define SD_BALANCE_EXEC 0x0004
931#define SD_BALANCE_FORK 0x0008
932#define SD_BALANCE_WAKE 0x0010
933#define SD_WAKE_AFFINE 0x0020
934#define SD_SHARE_CPUPOWER 0x0080
935#define SD_SHARE_PKG_RESOURCES 0x0200
936#define SD_SERIALIZE 0x0400
937#define SD_ASYM_PACKING 0x0800
938#define SD_PREFER_SIBLING 0x1000
939#define SD_OVERLAP 0x2000
940#define SD_NUMA 0x4000
941
942extern int __weak arch_sd_sibiling_asym_packing(void);
943
944#ifdef CONFIG_SCHED_SMT
945static inline int cpu_smt_flags(void)
946{
947 return SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES;
948}
949#endif
950
951#ifdef CONFIG_SCHED_MC
952static inline int cpu_core_flags(void)
953{
954 return SD_SHARE_PKG_RESOURCES;
955}
956#endif
957
958#ifdef CONFIG_NUMA
959static inline int cpu_numa_flags(void)
960{
961 return SD_NUMA;
962}
963#endif
964
965extern int arch_asym_cpu_priority(int cpu);
966
967struct sched_domain_attr {
968 int relax_domain_level;
969};
970
971#define SD_ATTR_INIT (struct sched_domain_attr) { \
972 .relax_domain_level = -1, \
973}
974
975extern int sched_domain_level_max;
976
977struct sched_group;
978
979struct sched_domain {
980
981 struct sched_domain *parent;
982 struct sched_domain *child;
983 struct sched_group *groups;
984 unsigned long min_interval;
985 unsigned long max_interval;
986 unsigned int busy_factor;
987 unsigned int imbalance_pct;
988 unsigned int cache_nice_tries;
989 unsigned int busy_idx;
990 unsigned int idle_idx;
991 unsigned int newidle_idx;
992 unsigned int wake_idx;
993 unsigned int forkexec_idx;
994 unsigned int smt_gain;
995
996 int nohz_idle;
997 int flags;
998 int level;
999
1000
1001 unsigned long last_balance;
1002 unsigned int balance_interval;
1003 unsigned int nr_balance_failed;
1004
1005 u64 last_update;
1006
1007
1008 u64 max_newidle_lb_cost;
1009 unsigned long next_decay_max_lb_cost;
1010
1011#ifdef CONFIG_SCHED_DEBUG
1012 char *name;
1013#endif
1014 union {
1015 void *private;
1016 struct rcu_head rcu;
1017 };
1018
1019 unsigned int span_weight;
1020
1021#ifndef __GENKSYMS__
1022
1023
1024 unsigned int lb_count[CPU_MAX_IDLE_TYPES];
1025 unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
1026 unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
1027 unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
1028 unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
1029 unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
1030 unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
1031 unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
1032
1033
1034 unsigned int alb_count;
1035 unsigned int alb_failed;
1036 unsigned int alb_pushed;
1037
1038
1039 unsigned int sbe_count;
1040 unsigned int sbe_balanced;
1041 unsigned int sbe_pushed;
1042
1043
1044 unsigned int sbf_count;
1045 unsigned int sbf_balanced;
1046 unsigned int sbf_pushed;
1047
1048
1049 unsigned int ttwu_wake_remote;
1050 unsigned int ttwu_move_affine;
1051 unsigned int ttwu_move_balance;
1052#endif
1053
1054
1055
1056
1057
1058
1059
1060
1061 unsigned long span[0];
1062};
1063
1064static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
1065{
1066 return to_cpumask(sd->span);
1067}
1068
1069extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1070 struct sched_domain_attr *dattr_new);
1071
1072
1073cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
1074void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
1075
1076bool cpus_share_cache(int this_cpu, int that_cpu);
1077
1078typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
1079typedef int (*sched_domain_flags_f)(void);
1080
1081#define SDTL_OVERLAP 0x01
1082
1083struct sd_data {
1084 struct sched_domain **__percpu sd;
1085 struct sched_group **__percpu sg;
1086 struct sched_group_power **__percpu sgp;
1087};
1088
1089struct sched_domain_topology_level {
1090 sched_domain_mask_f mask;
1091 sched_domain_flags_f sd_flags;
1092 int flags;
1093 int numa_level;
1094 struct sd_data data;
1095#ifdef CONFIG_SCHED_DEBUG
1096 char *name;
1097#endif
1098};
1099
1100extern struct sched_domain_topology_level *sched_domain_topology;
1101
1102extern void set_sched_topology(struct sched_domain_topology_level *tl);
1103
1104#ifdef CONFIG_SCHED_DEBUG
1105# define SD_INIT_NAME(type) .name = #type
1106#else
1107# define SD_INIT_NAME(type)
1108#endif
1109
1110#else
1111
1112struct sched_domain_attr;
1113
1114static inline void
1115partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1116 struct sched_domain_attr *dattr_new)
1117{
1118}
1119
1120static inline bool cpus_share_cache(int this_cpu, int that_cpu)
1121{
1122 return true;
1123}
1124
1125#endif
1126
1127
1128struct io_context;
1129
1130
1131#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
1132extern void prefetch_stack(struct task_struct *t);
1133#else
1134static inline void prefetch_stack(struct task_struct *t) { }
1135#endif
1136
1137struct audit_context;
1138struct mempolicy;
1139struct pipe_inode_info;
1140struct uts_namespace;
1141
1142struct load_weight {
1143
1144 unsigned long weight, inv_weight;
1145};
1146
1147struct sched_avg {
1148
1149
1150
1151
1152
1153 u32 runnable_avg_sum, runnable_avg_period;
1154 u64 last_runnable_update;
1155 s64 decay_count;
1156 unsigned long load_avg_contrib;
1157};
1158
1159#ifdef CONFIG_SCHEDSTATS
1160struct sched_statistics {
1161 u64 wait_start;
1162 u64 wait_max;
1163 u64 wait_count;
1164 u64 wait_sum;
1165 u64 iowait_count;
1166 u64 iowait_sum;
1167
1168 u64 sleep_start;
1169 u64 sleep_max;
1170 s64 sum_sleep_runtime;
1171
1172 u64 block_start;
1173 u64 block_max;
1174 u64 exec_max;
1175 u64 slice_max;
1176
1177 u64 nr_migrations_cold;
1178 u64 nr_failed_migrations_affine;
1179 u64 nr_failed_migrations_running;
1180 u64 nr_failed_migrations_hot;
1181 u64 nr_forced_migrations;
1182
1183 u64 nr_wakeups;
1184 u64 nr_wakeups_sync;
1185 u64 nr_wakeups_migrate;
1186 u64 nr_wakeups_local;
1187 u64 nr_wakeups_remote;
1188 u64 nr_wakeups_affine;
1189 u64 nr_wakeups_affine_attempts;
1190 u64 nr_wakeups_passive;
1191 u64 nr_wakeups_idle;
1192};
1193#endif
1194
1195struct sched_entity {
1196 struct load_weight load;
1197 struct rb_node run_node;
1198 struct list_head group_node;
1199 unsigned int on_rq;
1200
1201 u64 exec_start;
1202 u64 sum_exec_runtime;
1203 u64 vruntime;
1204 u64 prev_sum_exec_runtime;
1205
1206 u64 nr_migrations;
1207
1208#ifdef CONFIG_FAIR_GROUP_SCHED
1209 struct sched_entity *parent;
1210
1211 struct cfs_rq *cfs_rq;
1212
1213 struct cfs_rq *my_q;
1214#endif
1215
1216
1217
1218
1219
1220
1221#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
1222
1223 struct sched_avg avg;
1224#endif
1225
1226 RH_KABI_USE(1, struct sched_statistics *statistics)
1227
1228
1229 RH_KABI_RESERVE(2)
1230 RH_KABI_RESERVE(3)
1231 RH_KABI_RESERVE(4)
1232};
1233
1234struct sched_rt_entity {
1235 struct list_head run_list;
1236 unsigned long timeout;
1237 unsigned long watchdog_stamp;
1238 unsigned int time_slice;
1239
1240 struct sched_rt_entity *back;
1241#ifdef CONFIG_RT_GROUP_SCHED
1242 struct sched_rt_entity *parent;
1243
1244 struct rt_rq *rt_rq;
1245
1246 struct rt_rq *my_q;
1247#endif
1248};
1249
1250struct sched_dl_entity {
1251 struct rb_node rb_node;
1252
1253
1254
1255
1256
1257
1258 u64 dl_runtime;
1259 u64 dl_deadline;
1260 u64 dl_period;
1261 u64 dl_bw;
1262 u64 dl_density;
1263
1264
1265
1266
1267
1268
1269 s64 runtime;
1270 u64 deadline;
1271 unsigned int flags;
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294 int dl_throttled, dl_boosted, dl_yielded, dl_non_contending;
1295
1296
1297
1298
1299
1300 struct hrtimer dl_timer;
1301
1302
1303
1304
1305
1306
1307
1308
1309 struct hrtimer inactive_timer;
1310};
1311
1312struct rcu_node;
1313
1314enum perf_event_task_context {
1315 perf_invalid_context = -1,
1316 perf_hw_context = 0,
1317 perf_sw_context,
1318 perf_nr_task_contexts,
1319};
1320
1321
1322struct tlbflush_unmap_batch {
1323
1324
1325
1326
1327 struct cpumask cpumask;
1328
1329
1330 bool flush_required;
1331
1332
1333
1334
1335
1336
1337 bool writable;
1338};
1339
1340struct task_struct {
1341 volatile long state;
1342 void *stack;
1343 atomic_t usage;
1344 unsigned int flags;
1345 unsigned int ptrace;
1346
1347#ifdef CONFIG_SMP
1348 struct llist_node wake_entry;
1349 int on_cpu;
1350 struct task_struct *last_wakee;
1351 unsigned long wakee_flips;
1352 unsigned long wakee_flip_decay_ts;
1353
1354 int wake_cpu;
1355#endif
1356 int on_rq;
1357
1358 int prio, static_prio, normal_prio;
1359 unsigned int rt_priority;
1360 const struct sched_class *sched_class;
1361 struct sched_entity se;
1362 struct sched_rt_entity rt;
1363#ifdef CONFIG_CGROUP_SCHED
1364 struct task_group *sched_task_group;
1365#endif
1366#ifdef CONFIG_PREEMPT_NOTIFIERS
1367
1368 struct hlist_head preempt_notifiers;
1369#elif defined(CONFIG_S390)
1370 RH_KABI_DEPRECATE(struct hlist_head, preempt_notifiers)
1371#endif
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381 unsigned char fpu_counter;
1382#ifdef CONFIG_BLK_DEV_IO_TRACE
1383 unsigned int btrace_seq;
1384#endif
1385
1386 unsigned int policy;
1387 int nr_cpus_allowed;
1388 cpumask_t cpus_allowed;
1389
1390#ifdef CONFIG_PREEMPT_RCU
1391 int rcu_read_lock_nesting;
1392 char rcu_read_unlock_special;
1393 struct list_head rcu_node_entry;
1394#endif
1395#ifdef CONFIG_TREE_PREEMPT_RCU
1396 struct rcu_node *rcu_blocked_node;
1397#endif
1398#ifdef CONFIG_RCU_BOOST
1399 struct rt_mutex *rcu_boost_mutex;
1400#endif
1401
1402#ifdef CONFIG_SCHED_INFO
1403 struct sched_info sched_info;
1404#endif
1405
1406 struct list_head tasks;
1407#ifdef CONFIG_SMP
1408 struct plist_node pushable_tasks;
1409#endif
1410
1411 struct mm_struct *mm, *active_mm;
1412#ifdef CONFIG_COMPAT_BRK
1413 unsigned brk_randomized:1;
1414#endif
1415#if defined(SPLIT_RSS_COUNTING)
1416 struct task_rss_stat rss_stat;
1417#endif
1418
1419 int exit_state;
1420 int exit_code, exit_signal;
1421 int pdeath_signal;
1422 unsigned int jobctl;
1423
1424
1425 unsigned int personality;
1426
1427 unsigned did_exec:1;
1428 unsigned in_execve:1;
1429
1430 unsigned in_iowait:1;
1431
1432
1433 unsigned no_new_privs:1;
1434
1435
1436 unsigned sched_reset_on_fork:1;
1437 unsigned sched_contributes_to_load:1;
1438 RH_KABI_FILL_HOLE(unsigned sched_remote_wakeup:1)
1439
1440 pid_t pid;
1441 pid_t tgid;
1442
1443#ifdef CONFIG_CC_STACKPROTECTOR
1444
1445 unsigned long stack_canary;
1446#endif
1447
1448
1449
1450
1451
1452 struct task_struct __rcu *real_parent;
1453 struct task_struct __rcu *parent;
1454
1455
1456
1457 struct list_head children;
1458 struct list_head sibling;
1459 struct task_struct *group_leader;
1460
1461
1462
1463
1464
1465
1466 struct list_head ptraced;
1467 struct list_head ptrace_entry;
1468
1469
1470 struct pid_link pids[PIDTYPE_MAX];
1471 struct list_head thread_group;
1472 struct list_head thread_node;
1473
1474 struct completion *vfork_done;
1475 int __user *set_child_tid;
1476 int __user *clear_child_tid;
1477
1478 cputime_t utime, stime, utimescaled, stimescaled;
1479 cputime_t gtime;
1480#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
1481 RH_KABI_DEPRECATE(struct cputime, prev_cputime)
1482#endif
1483#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1484 seqlock_t vtime_seqlock;
1485 unsigned long long vtime_snap;
1486 enum {
1487 VTIME_SLEEPING = 0,
1488 VTIME_USER,
1489 VTIME_SYS,
1490 } vtime_snap_whence;
1491#endif
1492 unsigned long nvcsw, nivcsw;
1493 struct timespec start_time;
1494 struct timespec real_start_time;
1495
1496 unsigned long min_flt, maj_flt;
1497
1498 struct task_cputime cputime_expires;
1499 struct list_head cpu_timers[3];
1500
1501
1502 const struct cred __rcu *real_cred;
1503
1504 const struct cred __rcu *cred;
1505
1506 char comm[TASK_COMM_LEN];
1507
1508
1509
1510
1511 int link_count, total_link_count;
1512#ifdef CONFIG_SYSVIPC
1513
1514 struct sysv_sem sysvsem;
1515#endif
1516
1517 struct thread_struct thread;
1518
1519 struct fs_struct *fs;
1520
1521 struct files_struct *files;
1522
1523 struct nsproxy *nsproxy;
1524
1525 struct signal_struct *signal;
1526 struct sighand_struct *sighand;
1527
1528 sigset_t blocked, real_blocked;
1529 sigset_t saved_sigmask;
1530 struct sigpending pending;
1531
1532 unsigned long sas_ss_sp;
1533 size_t sas_ss_size;
1534 int (*notifier)(void *priv);
1535 void *notifier_data;
1536 sigset_t *notifier_mask;
1537 struct callback_head *task_works;
1538
1539 struct audit_context *audit_context;
1540#ifdef CONFIG_AUDITSYSCALL
1541 kuid_t loginuid;
1542 unsigned int sessionid;
1543#endif
1544 struct seccomp seccomp;
1545
1546
1547 u32 parent_exec_id;
1548 u32 self_exec_id;
1549
1550
1551 spinlock_t alloc_lock;
1552
1553
1554 raw_spinlock_t pi_lock;
1555
1556#ifdef CONFIG_RT_MUTEXES
1557
1558#ifndef __GENKSYMS__
1559 struct rb_root pi_waiters;
1560 struct rb_node *pi_waiters_leftmost;
1561#else
1562 struct plist_head pi_waiters;
1563#endif
1564
1565 struct rt_mutex_waiter *pi_blocked_on;
1566#endif
1567
1568#ifdef CONFIG_DEBUG_MUTEXES
1569
1570 struct mutex_waiter *blocked_on;
1571#endif
1572#ifdef CONFIG_TRACE_IRQFLAGS
1573 unsigned int irq_events;
1574 unsigned long hardirq_enable_ip;
1575 unsigned long hardirq_disable_ip;
1576 unsigned int hardirq_enable_event;
1577 unsigned int hardirq_disable_event;
1578 int hardirqs_enabled;
1579 int hardirq_context;
1580 unsigned long softirq_disable_ip;
1581 unsigned long softirq_enable_ip;
1582 unsigned int softirq_disable_event;
1583 unsigned int softirq_enable_event;
1584 int softirqs_enabled;
1585 int softirq_context;
1586#endif
1587#ifdef CONFIG_LOCKDEP
1588# define MAX_LOCK_DEPTH 48UL
1589 u64 curr_chain_key;
1590 int lockdep_depth;
1591 unsigned int lockdep_recursion;
1592 struct held_lock held_locks[MAX_LOCK_DEPTH];
1593 gfp_t lockdep_reclaim_gfp;
1594#endif
1595
1596
1597 void *journal_info;
1598
1599
1600 struct bio_list *bio_list;
1601
1602#ifdef CONFIG_BLOCK
1603
1604 struct blk_plug *plug;
1605#endif
1606
1607
1608 struct reclaim_state *reclaim_state;
1609
1610 struct backing_dev_info *backing_dev_info;
1611
1612 struct io_context *io_context;
1613
1614 unsigned long ptrace_message;
1615 siginfo_t *last_siginfo;
1616 struct task_io_accounting ioac;
1617#if defined(CONFIG_TASK_XACCT)
1618 u64 acct_rss_mem1;
1619 u64 acct_vm_mem1;
1620 cputime_t acct_timexpd;
1621#endif
1622#ifdef CONFIG_CPUSETS
1623 nodemask_t mems_allowed;
1624 seqcount_t mems_allowed_seq;
1625 int cpuset_mem_spread_rotor;
1626 int cpuset_slab_spread_rotor;
1627#endif
1628#ifdef CONFIG_CGROUPS
1629
1630 struct css_set __rcu *cgroups;
1631
1632 struct list_head cg_list;
1633#endif
1634#ifdef CONFIG_FUTEX
1635 struct robust_list_head __user *robust_list;
1636#ifdef CONFIG_COMPAT
1637 struct compat_robust_list_head __user *compat_robust_list;
1638#endif
1639 struct list_head pi_state_list;
1640 struct futex_pi_state *pi_state_cache;
1641#endif
1642#ifdef CONFIG_PERF_EVENTS
1643 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1644 struct mutex perf_event_mutex;
1645 struct list_head perf_event_list;
1646#endif
1647#ifdef CONFIG_NUMA
1648 struct mempolicy *mempolicy;
1649 short il_next;
1650 short pref_node_fork;
1651#endif
1652#ifdef CONFIG_NUMA_BALANCING
1653 int numa_scan_seq;
1654 unsigned int numa_scan_period;
1655 unsigned int numa_scan_period_max;
1656 int numa_preferred_nid;
1657 unsigned long numa_migrate_retry;
1658 u64 node_stamp;
1659 u64 last_task_numa_placement;
1660 u64 last_sum_exec_runtime;
1661 struct callback_head numa_work;
1662
1663 struct list_head numa_entry;
1664 struct numa_group *numa_group;
1665
1666
1667
1668
1669
1670
1671 unsigned long *numa_faults_memory;
1672 unsigned long total_numa_faults;
1673
1674
1675
1676
1677
1678
1679 unsigned long *numa_faults_buffer_memory;
1680
1681
1682
1683
1684
1685 unsigned long *numa_faults_cpu;
1686 unsigned long *numa_faults_buffer_cpu;
1687
1688
1689
1690
1691
1692
1693
1694 unsigned long numa_faults_locality[2];
1695
1696 unsigned long numa_pages_migrated;
1697#endif
1698
1699 struct rcu_head rcu;
1700
1701
1702
1703
1704 struct pipe_inode_info *splice_pipe;
1705
1706 struct page_frag task_frag;
1707
1708#ifdef CONFIG_TASK_DELAY_ACCT
1709 struct task_delay_info *delays;
1710#endif
1711#ifdef CONFIG_FAULT_INJECTION
1712 int make_it_fail;
1713#endif
1714
1715
1716
1717
1718 int nr_dirtied;
1719 int nr_dirtied_pause;
1720 unsigned long dirty_paused_when;
1721
1722#ifdef CONFIG_LATENCYTOP
1723 int latency_record_count;
1724 struct latency_record latency_record[LT_SAVECOUNT];
1725#endif
1726
1727
1728
1729
1730 unsigned long timer_slack_ns;
1731 unsigned long default_timer_slack_ns;
1732
1733#if defined(CONFIG_FUNCTION_GRAPH_TRACER) && defined(CONFIG_X86_64)
1734
1735 int curr_ret_stack;
1736
1737 struct ftrace_ret_stack *ret_stack;
1738
1739 unsigned long long ftrace_timestamp;
1740
1741
1742
1743
1744 atomic_t trace_overrun;
1745
1746 atomic_t tracing_graph_pause;
1747#endif
1748#ifdef CONFIG_TRACING
1749
1750 unsigned long trace;
1751
1752 unsigned long trace_recursion;
1753#endif
1754#ifdef CONFIG_MEMCG
1755 struct memcg_batch_info {
1756 int do_batch;
1757 struct mem_cgroup *memcg;
1758 unsigned long nr_pages;
1759 unsigned long memsw_nr_pages;
1760 } memcg_batch;
1761 unsigned int memcg_kmem_skip_account;
1762#endif
1763#ifdef CONFIG_HAVE_HW_BREAKPOINT
1764 atomic_t ptrace_bp_refcnt;
1765#endif
1766#if !defined(CONFIG_S390) && defined(CONFIG_UPROBES)
1767 struct uprobe_task *utask;
1768#endif
1769#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1770 unsigned int sequential_io;
1771 unsigned int sequential_io_avg;
1772#endif
1773
1774
1775#ifdef CONFIG_DETECT_HUNG_TASK
1776 RH_KABI_USE(1, unsigned long last_switch_count)
1777#else
1778 RH_KABI_RESERVE(1)
1779#endif
1780 RH_KABI_USE(2, unsigned long atomic_flags)
1781#if defined(CONFIG_S390) && defined(CONFIG_UPROBES)
1782 RH_KABI_USE(3, struct uprobe_task *utask)
1783#else
1784 RH_KABI_RESERVE(3)
1785#endif
1786
1787 RH_KABI_USE(4, int mm_shmempages)
1788#ifdef CONFIG_INTEL_RDT
1789 RH_KABI_USE(5, u32 closid)
1790#else
1791 RH_KABI_RESERVE(5)
1792#endif
1793#ifdef CONFIG_LIVEPATCH
1794 RH_KABI_USE(6, int patch_state)
1795#else
1796 RH_KABI_RESERVE(6)
1797#endif
1798#ifdef CONFIG_INTEL_RDT
1799 RH_KABI_USE(7, u32 rmid)
1800#else
1801 RH_KABI_RESERVE(7)
1802#endif
1803 RH_KABI_RESERVE(8)
1804#ifndef __GENKSYMS__
1805#ifdef CONFIG_MEMCG
1806 struct memcg_oom_info {
1807 struct mem_cgroup *memcg;
1808 gfp_t gfp_mask;
1809 int order;
1810 unsigned int may_oom:1;
1811 } memcg_oom;
1812#endif
1813#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
1814 struct tlbflush_unmap_batch tlb_ubc;
1815#endif
1816#if defined(CONFIG_FUNCTION_GRAPH_TRACER) && !defined(CONFIG_X86_64)
1817
1818 int curr_ret_stack;
1819
1820 struct ftrace_ret_stack *ret_stack;
1821
1822 unsigned long long ftrace_timestamp;
1823
1824
1825
1826
1827 atomic_t trace_overrun;
1828
1829 atomic_t tracing_graph_pause;
1830#endif
1831 struct sched_dl_entity dl;
1832 #ifdef CONFIG_SMP
1833 struct rb_node pushable_dl_tasks;
1834 #endif
1835 struct sched_statistics statistics;
1836 struct wake_q_node wake_q;
1837 struct prev_cputime prev_cputime;
1838#endif
1839};
1840
1841
1842#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1843
1844#define TNF_MIGRATED 0x01
1845#define TNF_NO_GROUP 0x02
1846#define TNF_SHARED 0x04
1847#define TNF_FAULT_LOCAL 0x08
1848
1849#ifdef CONFIG_NUMA_BALANCING
1850extern void task_numa_fault(int last_node, int node, int pages, int flags);
1851extern pid_t task_numa_group_id(struct task_struct *p);
1852extern void set_numabalancing_state(bool enabled);
1853extern void task_numa_free(struct task_struct *p);
1854extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
1855 int src_nid, int dst_cpu);
1856#else
1857static inline void task_numa_fault(int last_node, int node, int pages,
1858 int flags)
1859{
1860}
1861static inline pid_t task_numa_group_id(struct task_struct *p)
1862{
1863 return 0;
1864}
1865static inline void set_numabalancing_state(bool enabled)
1866{
1867}
1868static inline void task_numa_free(struct task_struct *p)
1869{
1870}
1871static inline bool should_numa_migrate_memory(struct task_struct *p,
1872 struct page *page, int src_nid, int dst_cpu)
1873{
1874 return true;
1875}
1876#endif
1877
1878static inline struct pid *task_pid(struct task_struct *task)
1879{
1880 return task->pids[PIDTYPE_PID].pid;
1881}
1882
1883static inline struct pid *task_tgid(struct task_struct *task)
1884{
1885 return task->group_leader->pids[PIDTYPE_PID].pid;
1886}
1887
1888
1889
1890
1891
1892
1893static inline struct pid *task_pgrp(struct task_struct *task)
1894{
1895 return task->group_leader->pids[PIDTYPE_PGID].pid;
1896}
1897
1898static inline struct pid *task_session(struct task_struct *task)
1899{
1900 return task->group_leader->pids[PIDTYPE_SID].pid;
1901}
1902
1903struct pid_namespace;
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1919 struct pid_namespace *ns);
1920
1921static inline pid_t task_pid_nr(struct task_struct *tsk)
1922{
1923 return tsk->pid;
1924}
1925
1926static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
1927 struct pid_namespace *ns)
1928{
1929 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1930}
1931
1932static inline pid_t task_pid_vnr(struct task_struct *tsk)
1933{
1934 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1935}
1936
1937
1938static inline pid_t task_tgid_nr(struct task_struct *tsk)
1939{
1940 return tsk->tgid;
1941}
1942
1943pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1944
1945static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1946{
1947 return pid_vnr(task_tgid(tsk));
1948}
1949
1950
1951static inline int pid_alive(const struct task_struct *p);
1952static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1953{
1954 pid_t pid = 0;
1955
1956 rcu_read_lock();
1957 if (pid_alive(tsk))
1958 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1959 rcu_read_unlock();
1960
1961 return pid;
1962}
1963
1964static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1965{
1966 return task_ppid_nr_ns(tsk, &init_pid_ns);
1967}
1968
1969static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
1970 struct pid_namespace *ns)
1971{
1972 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1973}
1974
1975static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1976{
1977 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1978}
1979
1980
1981static inline pid_t task_session_nr_ns(struct task_struct *tsk,
1982 struct pid_namespace *ns)
1983{
1984 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1985}
1986
1987static inline pid_t task_session_vnr(struct task_struct *tsk)
1988{
1989 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1990}
1991
1992
1993static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1994{
1995 return task_pgrp_nr_ns(tsk, &init_pid_ns);
1996}
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008static inline int pid_alive(const struct task_struct *p)
2009{
2010 return p->pids[PIDTYPE_PID].pid != NULL;
2011}
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021static inline int is_global_init(struct task_struct *tsk)
2022{
2023 return tsk->pid == 1;
2024}
2025
2026extern struct pid *cad_pid;
2027
2028extern void free_task(struct task_struct *tsk);
2029#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
2030
2031extern void __put_task_struct(struct task_struct *t);
2032
2033static inline void put_task_struct(struct task_struct *t)
2034{
2035 if (atomic_dec_and_test(&t->usage))
2036 __put_task_struct(t);
2037}
2038
2039struct task_struct *task_rcu_dereference(struct task_struct **ptask);
2040struct task_struct *try_get_task_struct(struct task_struct **ptask);
2041
2042#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
2043extern void task_cputime(struct task_struct *t,
2044 cputime_t *utime, cputime_t *stime);
2045extern void task_cputime_scaled(struct task_struct *t,
2046 cputime_t *utimescaled, cputime_t *stimescaled);
2047extern cputime_t task_gtime(struct task_struct *t);
2048#else
2049static inline void task_cputime(struct task_struct *t,
2050 cputime_t *utime, cputime_t *stime)
2051{
2052 if (utime)
2053 *utime = t->utime;
2054 if (stime)
2055 *stime = t->stime;
2056}
2057
2058static inline void task_cputime_scaled(struct task_struct *t,
2059 cputime_t *utimescaled,
2060 cputime_t *stimescaled)
2061{
2062 if (utimescaled)
2063 *utimescaled = t->utimescaled;
2064 if (stimescaled)
2065 *stimescaled = t->stimescaled;
2066}
2067
2068static inline cputime_t task_gtime(struct task_struct *t)
2069{
2070 return t->gtime;
2071}
2072#endif
2073extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
2074extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
2075
2076
2077
2078
2079#define PF_EXITING 0x00000004
2080#define PF_EXITPIDONE 0x00000008
2081#define PF_VCPU 0x00000010
2082#define PF_WQ_WORKER 0x00000020
2083#define PF_FORKNOEXEC 0x00000040
2084#define PF_MCE_PROCESS 0x00000080
2085#define PF_SUPERPRIV 0x00000100
2086#define PF_DUMPCORE 0x00000200
2087#define PF_SIGNALED 0x00000400
2088#define PF_MEMALLOC 0x00000800
2089#define PF_NPROC_EXCEEDED 0x00001000
2090#define PF_USED_MATH 0x00002000
2091#define PF_USED_ASYNC 0x00004000
2092#define PF_NOFREEZE 0x00008000
2093#define PF_FROZEN 0x00010000
2094#define PF_FSTRANS 0x00020000
2095#define PF_KSWAPD 0x00040000
2096#define PF_MEMALLOC_NOIO 0x00080000
2097#define PF_LESS_THROTTLE 0x00100000
2098#define PF_KTHREAD 0x00200000
2099#define PF_RANDOMIZE 0x00400000
2100#define PF_SWAPWRITE 0x00800000
2101#define PF_NO_SETAFFINITY 0x04000000
2102#define PF_MCE_EARLY 0x08000000
2103#define PF_MEMPOLICY 0x10000000
2104#define PF_MUTEX_TESTER 0x20000000
2105#define PF_FREEZER_SKIP 0x40000000
2106#define PF_SUSPEND_TASK 0x80000000
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
2120#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
2121#define clear_used_math() clear_stopped_child_used_math(current)
2122#define set_used_math() set_stopped_child_used_math(current)
2123#define conditional_stopped_child_used_math(condition, child) \
2124 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
2125#define conditional_used_math(condition) \
2126 conditional_stopped_child_used_math(condition, current)
2127#define copy_to_stopped_child_used_math(child) \
2128 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
2129
2130#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
2131#define used_math() tsk_used_math(current)
2132
2133
2134#define PFA_NO_NEW_PRIVS 0
2135#define PFA_SPREAD_PAGE 1
2136#define PFA_SPREAD_SLAB 2
2137
2138#define PFA_SPEC_SSB_DISABLE 3
2139#define PFA_SPEC_SSB_FORCE_DISABLE 4
2140
2141#define TASK_PFA_TEST(name, func) \
2142 static inline bool task_##func(struct task_struct *p) \
2143 { return test_bit(PFA_##name, &p->atomic_flags); }
2144#define TASK_PFA_SET(name, func) \
2145 static inline void task_set_##func(struct task_struct *p) \
2146 { set_bit(PFA_##name, &p->atomic_flags); }
2147#define TASK_PFA_CLEAR(name, func) \
2148 static inline void task_clear_##func(struct task_struct *p) \
2149 { clear_bit(PFA_##name, &p->atomic_flags); }
2150
2151TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
2152TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
2153
2154TASK_PFA_TEST(SPREAD_PAGE, spread_page)
2155TASK_PFA_SET(SPREAD_PAGE, spread_page)
2156TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
2157
2158TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
2159TASK_PFA_SET(SPREAD_SLAB, spread_slab)
2160TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
2161
2162TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
2163TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
2164TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
2165
2166TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
2167TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
2168
2169
2170static inline gfp_t memalloc_noio_flags(gfp_t flags)
2171{
2172 if (unlikely(current->flags & PF_MEMALLOC_NOIO))
2173 flags &= ~__GFP_IO;
2174 return flags;
2175}
2176
2177static inline unsigned int memalloc_noio_save(void)
2178{
2179 unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
2180 current->flags |= PF_MEMALLOC_NOIO;
2181 return flags;
2182}
2183
2184static inline void memalloc_noio_restore(unsigned int flags)
2185{
2186 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
2187}
2188
2189
2190
2191
2192#define JOBCTL_STOP_SIGMASK 0xffff
2193
2194#define JOBCTL_STOP_DEQUEUED_BIT 16
2195#define JOBCTL_STOP_PENDING_BIT 17
2196#define JOBCTL_STOP_CONSUME_BIT 18
2197#define JOBCTL_TRAP_STOP_BIT 19
2198#define JOBCTL_TRAP_NOTIFY_BIT 20
2199#define JOBCTL_TRAPPING_BIT 21
2200#define JOBCTL_LISTENING_BIT 22
2201
2202#define JOBCTL_STOP_DEQUEUED (1 << JOBCTL_STOP_DEQUEUED_BIT)
2203#define JOBCTL_STOP_PENDING (1 << JOBCTL_STOP_PENDING_BIT)
2204#define JOBCTL_STOP_CONSUME (1 << JOBCTL_STOP_CONSUME_BIT)
2205#define JOBCTL_TRAP_STOP (1 << JOBCTL_TRAP_STOP_BIT)
2206#define JOBCTL_TRAP_NOTIFY (1 << JOBCTL_TRAP_NOTIFY_BIT)
2207#define JOBCTL_TRAPPING (1 << JOBCTL_TRAPPING_BIT)
2208#define JOBCTL_LISTENING (1 << JOBCTL_LISTENING_BIT)
2209
2210#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
2211#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
2212
2213extern bool task_set_jobctl_pending(struct task_struct *task,
2214 unsigned int mask);
2215extern void task_clear_jobctl_trapping(struct task_struct *task);
2216extern void task_clear_jobctl_pending(struct task_struct *task,
2217 unsigned int mask);
2218
2219#ifdef CONFIG_PREEMPT_RCU
2220
2221#define RCU_READ_UNLOCK_BLOCKED (1 << 0)
2222#define RCU_READ_UNLOCK_NEED_QS (1 << 1)
2223
2224static inline void rcu_copy_process(struct task_struct *p)
2225{
2226 p->rcu_read_lock_nesting = 0;
2227 p->rcu_read_unlock_special = 0;
2228#ifdef CONFIG_TREE_PREEMPT_RCU
2229 p->rcu_blocked_node = NULL;
2230#endif
2231#ifdef CONFIG_RCU_BOOST
2232 p->rcu_boost_mutex = NULL;
2233#endif
2234 INIT_LIST_HEAD(&p->rcu_node_entry);
2235}
2236
2237#else
2238
2239static inline void rcu_copy_process(struct task_struct *p)
2240{
2241}
2242
2243#endif
2244
2245static inline void tsk_restore_flags(struct task_struct *task,
2246 unsigned long orig_flags, unsigned long flags)
2247{
2248 task->flags &= ~flags;
2249 task->flags |= orig_flags & flags;
2250}
2251
2252extern int cpuset_cpumask_can_shrink(const struct cpumask *cur,
2253 const struct cpumask *trial);
2254extern int task_can_attach(struct task_struct *p,
2255 const struct cpumask *cs_cpus_allowed);
2256#ifdef CONFIG_SMP
2257extern void do_set_cpus_allowed(struct task_struct *p,
2258 const struct cpumask *new_mask);
2259
2260extern int set_cpus_allowed_ptr(struct task_struct *p,
2261 const struct cpumask *new_mask);
2262#else
2263static inline void do_set_cpus_allowed(struct task_struct *p,
2264 const struct cpumask *new_mask)
2265{
2266}
2267static inline int set_cpus_allowed_ptr(struct task_struct *p,
2268 const struct cpumask *new_mask)
2269{
2270 if (!cpumask_test_cpu(0, new_mask))
2271 return -EINVAL;
2272 return 0;
2273}
2274#endif
2275
2276#ifdef CONFIG_NO_HZ_COMMON
2277void calc_load_enter_idle(void);
2278void calc_load_exit_idle(void);
2279#else
2280static inline void calc_load_enter_idle(void) { }
2281static inline void calc_load_exit_idle(void) { }
2282#endif
2283
2284#ifndef CONFIG_CPUMASK_OFFSTACK
2285static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
2286{
2287 return set_cpus_allowed_ptr(p, &new_mask);
2288}
2289#endif
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299extern unsigned long long notrace sched_clock(void);
2300
2301
2302
2303extern u64 cpu_clock(int cpu);
2304extern u64 local_clock(void);
2305extern u64 running_clock(void);
2306extern u64 sched_clock_cpu(int cpu);
2307
2308
2309extern void sched_clock_init(void);
2310
2311#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2312static inline void sched_clock_tick(void)
2313{
2314}
2315
2316static inline void sched_clock_idle_sleep_event(void)
2317{
2318}
2319
2320static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
2321{
2322}
2323#else
2324
2325
2326
2327
2328
2329
2330extern int sched_clock_stable(void);
2331extern void set_sched_clock_stable(void);
2332extern void clear_sched_clock_stable(void);
2333
2334extern void sched_clock_tick(void);
2335extern void sched_clock_idle_sleep_event(void);
2336extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2337#endif
2338
2339#ifdef CONFIG_IRQ_TIME_ACCOUNTING
2340
2341
2342
2343
2344
2345extern void enable_sched_clock_irqtime(void);
2346extern void disable_sched_clock_irqtime(void);
2347#else
2348static inline void enable_sched_clock_irqtime(void) {}
2349static inline void disable_sched_clock_irqtime(void) {}
2350#endif
2351
2352extern unsigned long long
2353task_sched_runtime(struct task_struct *task);
2354
2355
2356#ifdef CONFIG_SMP
2357extern void sched_exec(void);
2358#else
2359#define sched_exec() {}
2360#endif
2361
2362extern void sched_clock_idle_sleep_event(void);
2363extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2364
2365#ifdef CONFIG_HOTPLUG_CPU
2366extern void idle_task_exit(void);
2367#else
2368static inline void idle_task_exit(void) {}
2369#endif
2370
2371#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
2372extern void wake_up_nohz_cpu(int cpu);
2373#else
2374static inline void wake_up_nohz_cpu(int cpu) { }
2375#endif
2376
2377#ifdef CONFIG_NO_HZ_FULL
2378extern bool sched_can_stop_tick(void);
2379extern u64 scheduler_tick_max_deferment(void);
2380#else
2381static inline bool sched_can_stop_tick(void) { return false; }
2382#endif
2383
2384#ifdef CONFIG_SCHED_AUTOGROUP
2385extern void sched_autogroup_create_attach(struct task_struct *p);
2386extern void sched_autogroup_detach(struct task_struct *p);
2387extern void sched_autogroup_fork(struct signal_struct *sig);
2388extern void sched_autogroup_exit(struct signal_struct *sig);
2389extern void sched_autogroup_exit_task(struct task_struct *p);
2390#ifdef CONFIG_PROC_FS
2391extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
2392extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
2393#endif
2394#else
2395static inline void sched_autogroup_create_attach(struct task_struct *p) { }
2396static inline void sched_autogroup_detach(struct task_struct *p) { }
2397static inline void sched_autogroup_fork(struct signal_struct *sig) { }
2398static inline void sched_autogroup_exit(struct signal_struct *sig) { }
2399static inline void sched_autogroup_exit_task(struct task_struct *p) { }
2400#endif
2401
2402extern int yield_to(struct task_struct *p, bool preempt);
2403extern void set_user_nice(struct task_struct *p, long nice);
2404extern int task_prio(const struct task_struct *p);
2405extern int task_nice(const struct task_struct *p);
2406extern int can_nice(const struct task_struct *p, const int nice);
2407extern int task_curr(const struct task_struct *p);
2408extern int idle_cpu(int cpu);
2409extern int sched_setscheduler(struct task_struct *, int,
2410 const struct sched_param *);
2411extern int sched_setscheduler_nocheck(struct task_struct *, int,
2412 const struct sched_param *);
2413extern int sched_setattr(struct task_struct *,
2414 const struct sched_attr *);
2415extern struct task_struct *idle_task(int cpu);
2416
2417
2418
2419
2420
2421
2422static inline bool is_idle_task(const struct task_struct *p)
2423{
2424 return p->pid == 0;
2425}
2426extern struct task_struct *curr_task(int cpu);
2427extern void set_curr_task(int cpu, struct task_struct *p);
2428
2429void yield(void);
2430
2431
2432
2433
2434extern struct exec_domain default_exec_domain;
2435
2436union thread_union {
2437 struct thread_info thread_info;
2438 unsigned long stack[THREAD_SIZE/sizeof(long)];
2439};
2440
2441#ifndef __HAVE_ARCH_KSTACK_END
2442static inline int kstack_end(void *addr)
2443{
2444
2445
2446
2447 return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
2448}
2449#endif
2450
2451extern union thread_union init_thread_union;
2452extern struct task_struct init_task;
2453
2454extern struct mm_struct init_mm;
2455
2456extern struct pid_namespace init_pid_ns;
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469extern struct task_struct *find_task_by_vpid(pid_t nr);
2470extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2471 struct pid_namespace *ns);
2472
2473extern void __set_special_pids(struct pid *pid);
2474
2475
2476extern struct user_struct * alloc_uid(kuid_t);
2477static inline struct user_struct *get_uid(struct user_struct *u)
2478{
2479 atomic_inc(&u->__count);
2480 return u;
2481}
2482extern void free_uid(struct user_struct *);
2483
2484#include <asm/current.h>
2485
2486extern void xtime_update(unsigned long ticks);
2487
2488extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2489extern int wake_up_process(struct task_struct *tsk);
2490extern void wake_up_new_task(struct task_struct *tsk);
2491#ifdef CONFIG_SMP
2492 extern void kick_process(struct task_struct *tsk);
2493#else
2494 static inline void kick_process(struct task_struct *tsk) { }
2495#endif
2496extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
2497extern void sched_dead(struct task_struct *p);
2498
2499extern void proc_caches_init(void);
2500extern void flush_signals(struct task_struct *);
2501extern void __flush_signals(struct task_struct *);
2502extern void ignore_signals(struct task_struct *);
2503extern void flush_signal_handlers(struct task_struct *, int force_default);
2504extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
2505
2506static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
2507{
2508 unsigned long flags;
2509 int ret;
2510
2511 spin_lock_irqsave(&tsk->sighand->siglock, flags);
2512 ret = dequeue_signal(tsk, mask, info);
2513 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
2514
2515 return ret;
2516}
2517
2518extern void block_all_signals(int (*notifier)(void *priv), void *priv,
2519 sigset_t *mask);
2520extern void unblock_all_signals(void);
2521extern void release_task(struct task_struct * p);
2522extern int send_sig_info(int, struct siginfo *, struct task_struct *);
2523extern int force_sigsegv(int, struct task_struct *);
2524extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2525extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
2526extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2527extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
2528 const struct cred *, u32);
2529extern int kill_pgrp(struct pid *pid, int sig, int priv);
2530extern int kill_pid(struct pid *pid, int sig, int priv);
2531extern int kill_proc_info(int, struct siginfo *, pid_t);
2532extern __must_check bool do_notify_parent(struct task_struct *, int);
2533extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2534extern void force_sig(int, struct task_struct *);
2535extern int send_sig(int, struct task_struct *, int);
2536extern int zap_other_threads(struct task_struct *p);
2537extern struct sigqueue *sigqueue_alloc(void);
2538extern void sigqueue_free(struct sigqueue *);
2539extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
2540extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
2541
2542static inline void restore_saved_sigmask(void)
2543{
2544 if (test_and_clear_restore_sigmask())
2545 __set_current_blocked(¤t->saved_sigmask);
2546}
2547
2548static inline sigset_t *sigmask_to_save(void)
2549{
2550 sigset_t *res = ¤t->blocked;
2551 if (unlikely(test_restore_sigmask()))
2552 res = ¤t->saved_sigmask;
2553 return res;
2554}
2555
2556static inline int kill_cad_pid(int sig, int priv)
2557{
2558 return kill_pid(cad_pid, sig, priv);
2559}
2560
2561
2562#define SEND_SIG_NOINFO ((struct siginfo *) 0)
2563#define SEND_SIG_PRIV ((struct siginfo *) 1)
2564#define SEND_SIG_FORCED ((struct siginfo *) 2)
2565
2566
2567
2568
2569static inline int on_sig_stack(unsigned long sp)
2570{
2571#ifdef CONFIG_STACK_GROWSUP
2572 return sp >= current->sas_ss_sp &&
2573 sp - current->sas_ss_sp < current->sas_ss_size;
2574#else
2575 return sp > current->sas_ss_sp &&
2576 sp - current->sas_ss_sp <= current->sas_ss_size;
2577#endif
2578}
2579
2580static inline int sas_ss_flags(unsigned long sp)
2581{
2582 return (current->sas_ss_size == 0 ? SS_DISABLE
2583 : on_sig_stack(sp) ? SS_ONSTACK : 0);
2584}
2585
2586static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
2587{
2588 if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
2589#ifdef CONFIG_STACK_GROWSUP
2590 return current->sas_ss_sp;
2591#else
2592 return current->sas_ss_sp + current->sas_ss_size;
2593#endif
2594 return sp;
2595}
2596
2597
2598
2599
2600extern struct mm_struct * mm_alloc(void);
2601
2602
2603extern void __mmdrop(struct mm_struct *);
2604static inline void mmdrop(struct mm_struct *mm)
2605{
2606
2607
2608
2609
2610
2611 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
2612 __mmdrop(mm);
2613}
2614
2615static inline bool mmget_not_zero(struct mm_struct *mm)
2616{
2617 return atomic_inc_not_zero(&mm->mm_users);
2618}
2619
2620
2621extern void mmput(struct mm_struct *);
2622
2623extern struct mm_struct *get_task_mm(struct task_struct *task);
2624
2625
2626
2627
2628
2629extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
2630
2631extern void mm_release(struct task_struct *, struct mm_struct *);
2632
2633extern struct mm_struct *dup_mm(struct task_struct *tsk);
2634
2635extern int copy_thread(unsigned long, unsigned long, unsigned long,
2636 struct task_struct *);
2637extern void flush_thread(void);
2638extern void exit_thread(void);
2639
2640extern void exit_files(struct task_struct *);
2641extern void __cleanup_sighand(struct sighand_struct *);
2642
2643extern void exit_itimers(struct signal_struct *);
2644extern void flush_itimer_signals(void);
2645
2646extern void do_group_exit(int);
2647
2648extern int allow_signal(int);
2649extern int disallow_signal(int);
2650
2651extern int do_execve(struct filename *,
2652 const char __user * const __user *,
2653 const char __user * const __user *);
2654extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
2655struct task_struct *fork_idle(int);
2656extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
2657
2658extern void __set_task_comm(struct task_struct *tsk, char *from, bool exec);
2659static inline void set_task_comm(struct task_struct *tsk, char *from)
2660{
2661 __set_task_comm(tsk, from, false);
2662}
2663extern char *get_task_comm(char *to, struct task_struct *tsk);
2664
2665#ifdef CONFIG_SMP
2666void scheduler_ipi(void);
2667extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
2668#else
2669static inline void scheduler_ipi(void) { }
2670static inline unsigned long wait_task_inactive(struct task_struct *p,
2671 long match_state)
2672{
2673 return 1;
2674}
2675#endif
2676
2677#define next_task(p) \
2678 list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
2679
2680#define for_each_process(p) \
2681 for (p = &init_task ; (p = next_task(p)) != &init_task ; )
2682
2683extern bool current_is_single_threaded(void);
2684
2685
2686
2687
2688
2689#define do_each_thread(g, t) \
2690 for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
2691
2692#define while_each_thread(g, t) \
2693 while ((t = next_thread(t)) != g)
2694
2695#define __for_each_thread(signal, t) \
2696 list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
2697
2698#define for_each_thread(p, t) \
2699 __for_each_thread((p)->signal, t)
2700
2701
2702#define for_each_process_thread(p, t) \
2703 for_each_process(p) for_each_thread(p, t)
2704
2705static inline int get_nr_threads(struct task_struct *tsk)
2706{
2707 return tsk->signal->nr_threads;
2708}
2709
2710static inline bool thread_group_leader(struct task_struct *p)
2711{
2712 return p->exit_signal >= 0;
2713}
2714
2715
2716
2717
2718
2719
2720
2721static inline int has_group_leader_pid(struct task_struct *p)
2722{
2723 return p->pid == p->tgid;
2724}
2725
2726static inline
2727int same_thread_group(struct task_struct *p1, struct task_struct *p2)
2728{
2729 return p1->tgid == p2->tgid;
2730}
2731
2732static inline struct task_struct *next_thread(const struct task_struct *p)
2733{
2734 return list_entry_rcu(p->thread_group.next,
2735 struct task_struct, thread_group);
2736}
2737
2738
2739
2740
2741
2742
2743
2744
2745static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
2746{
2747 return NULL;
2748}
2749
2750static inline int thread_group_empty(struct task_struct *p)
2751{
2752 return list_empty(&p->thread_group);
2753}
2754
2755#define delay_group_leader(p) \
2756 (thread_group_leader(p) && !thread_group_empty(p))
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768static inline void task_lock(struct task_struct *p)
2769{
2770 spin_lock(&p->alloc_lock);
2771}
2772
2773static inline void task_unlock(struct task_struct *p)
2774{
2775 spin_unlock(&p->alloc_lock);
2776}
2777
2778extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2779 unsigned long *flags);
2780
2781static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
2782 unsigned long *flags)
2783{
2784 struct sighand_struct *ret;
2785
2786 ret = __lock_task_sighand(tsk, flags);
2787 (void)__cond_lock(&tsk->sighand->siglock, ret);
2788 return ret;
2789}
2790
2791static inline void unlock_task_sighand(struct task_struct *tsk,
2792 unsigned long *flags)
2793{
2794 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2795}
2796
2797#ifdef CONFIG_CGROUPS
2798static inline void threadgroup_change_begin(struct task_struct *tsk)
2799{
2800 down_read(&tsk->signal->group_rwsem);
2801}
2802static inline void threadgroup_change_end(struct task_struct *tsk)
2803{
2804 up_read(&tsk->signal->group_rwsem);
2805}
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823static inline void threadgroup_lock(struct task_struct *tsk)
2824{
2825 down_write(&tsk->signal->group_rwsem);
2826}
2827
2828
2829
2830
2831
2832
2833
2834static inline void threadgroup_unlock(struct task_struct *tsk)
2835{
2836 up_write(&tsk->signal->group_rwsem);
2837}
2838#else
2839static inline void threadgroup_change_begin(struct task_struct *tsk) {}
2840static inline void threadgroup_change_end(struct task_struct *tsk) {}
2841static inline void threadgroup_lock(struct task_struct *tsk) {}
2842static inline void threadgroup_unlock(struct task_struct *tsk) {}
2843#endif
2844
2845#ifndef __HAVE_THREAD_FUNCTIONS
2846
2847#define task_thread_info(task) ((struct thread_info *)(task)->stack)
2848#define task_stack_page(task) ((task)->stack)
2849
2850static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
2851{
2852 *task_thread_info(p) = *task_thread_info(org);
2853 task_thread_info(p)->task = p;
2854}
2855
2856static inline unsigned long *end_of_stack(struct task_struct *p)
2857{
2858 return (unsigned long *)(task_thread_info(p) + 1);
2859}
2860
2861#endif
2862
2863static inline int object_is_on_stack(void *obj)
2864{
2865 void *stack = task_stack_page(current);
2866
2867 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
2868}
2869
2870extern void thread_info_cache_init(void);
2871
2872#ifdef CONFIG_DEBUG_STACK_USAGE
2873static inline unsigned long stack_not_used(struct task_struct *p)
2874{
2875 unsigned long *n = end_of_stack(p);
2876
2877 do {
2878 n++;
2879 } while (!*n);
2880
2881 return (unsigned long)n - (unsigned long)end_of_stack(p);
2882}
2883#endif
2884
2885
2886
2887
2888static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
2889{
2890 set_ti_thread_flag(task_thread_info(tsk), flag);
2891}
2892
2893static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2894{
2895 clear_ti_thread_flag(task_thread_info(tsk), flag);
2896}
2897
2898static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
2899{
2900 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
2901}
2902
2903static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2904{
2905 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
2906}
2907
2908static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2909{
2910 return test_ti_thread_flag(task_thread_info(tsk), flag);
2911}
2912
2913static inline void set_tsk_need_resched(struct task_struct *tsk)
2914{
2915 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2916}
2917
2918static inline void clear_tsk_need_resched(struct task_struct *tsk)
2919{
2920 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2921}
2922
2923static inline int test_tsk_need_resched(struct task_struct *tsk)
2924{
2925 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2926}
2927
2928static inline int restart_syscall(void)
2929{
2930 set_tsk_thread_flag(current, TIF_SIGPENDING);
2931 return -ERESTARTNOINTR;
2932}
2933
2934static inline int signal_pending(struct task_struct *p)
2935{
2936 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
2937}
2938
2939static inline int __fatal_signal_pending(struct task_struct *p)
2940{
2941 return unlikely(sigismember(&p->pending.signal, SIGKILL));
2942}
2943
2944static inline int fatal_signal_pending(struct task_struct *p)
2945{
2946 return signal_pending(p) && __fatal_signal_pending(p);
2947}
2948
2949static inline int signal_pending_state(long state, struct task_struct *p)
2950{
2951 if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
2952 return 0;
2953 if (!signal_pending(p))
2954 return 0;
2955
2956 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
2957}
2958
2959static __always_inline int need_resched(void)
2960{
2961 return unlikely(test_thread_flag(TIF_NEED_RESCHED));
2962}
2963
2964
2965
2966
2967
2968
2969
2970
2971extern int _cond_resched(void);
2972
2973#define cond_resched() ({ \
2974 __might_sleep(__FILE__, __LINE__, 0); \
2975 _cond_resched(); \
2976})
2977
2978extern int __cond_resched_lock(spinlock_t *lock);
2979
2980#ifdef CONFIG_PREEMPT_COUNT
2981#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
2982#else
2983#define PREEMPT_LOCK_OFFSET 0
2984#endif
2985
2986#define cond_resched_lock(lock) ({ \
2987 __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
2988 __cond_resched_lock(lock); \
2989})
2990
2991extern int __cond_resched_softirq(void);
2992
2993#define cond_resched_softirq() ({ \
2994 __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
2995 __cond_resched_softirq(); \
2996})
2997
2998static inline void cond_resched_rcu(void)
2999{
3000#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
3001 rcu_read_unlock();
3002 cond_resched();
3003 rcu_read_lock();
3004#endif
3005}
3006
3007
3008
3009
3010
3011
3012static inline int spin_needbreak(spinlock_t *lock)
3013{
3014#ifdef CONFIG_PREEMPT
3015 return spin_is_contended(lock);
3016#else
3017 return 0;
3018#endif
3019}
3020
3021
3022
3023
3024
3025
3026
3027#ifdef TS_POLLING
3028static inline int tsk_is_polling(struct task_struct *p)
3029{
3030 return task_thread_info(p)->status & TS_POLLING;
3031}
3032static inline void __current_set_polling(void)
3033{
3034 current_thread_info()->status |= TS_POLLING;
3035}
3036
3037static inline bool __must_check current_set_polling_and_test(void)
3038{
3039 __current_set_polling();
3040
3041
3042
3043
3044
3045 smp_mb();
3046
3047 return unlikely(tif_need_resched());
3048}
3049
3050static inline void __current_clr_polling(void)
3051{
3052 current_thread_info()->status &= ~TS_POLLING;
3053}
3054
3055static inline bool __must_check current_clr_polling_and_test(void)
3056{
3057 __current_clr_polling();
3058
3059
3060
3061
3062
3063 smp_mb();
3064
3065 return unlikely(tif_need_resched());
3066}
3067#elif defined(TIF_POLLING_NRFLAG)
3068static inline int tsk_is_polling(struct task_struct *p)
3069{
3070 return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
3071}
3072
3073static inline void __current_set_polling(void)
3074{
3075 set_thread_flag(TIF_POLLING_NRFLAG);
3076}
3077
3078static inline bool __must_check current_set_polling_and_test(void)
3079{
3080 __current_set_polling();
3081
3082
3083
3084
3085
3086
3087
3088 smp_mb__after_clear_bit();
3089
3090 return unlikely(tif_need_resched());
3091}
3092
3093static inline void __current_clr_polling(void)
3094{
3095 clear_thread_flag(TIF_POLLING_NRFLAG);
3096}
3097
3098static inline bool __must_check current_clr_polling_and_test(void)
3099{
3100 __current_clr_polling();
3101
3102
3103
3104
3105
3106 smp_mb__after_clear_bit();
3107
3108 return unlikely(tif_need_resched());
3109}
3110
3111#else
3112static inline int tsk_is_polling(struct task_struct *p) { return 0; }
3113static inline void __current_set_polling(void) { }
3114static inline void __current_clr_polling(void) { }
3115
3116static inline bool __must_check current_set_polling_and_test(void)
3117{
3118 return unlikely(tif_need_resched());
3119}
3120static inline bool __must_check current_clr_polling_and_test(void)
3121{
3122 return unlikely(tif_need_resched());
3123}
3124#endif
3125
3126
3127
3128
3129void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
3130void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
3131
3132static inline void thread_group_cputime_init(struct signal_struct *sig)
3133{
3134 raw_spin_lock_init(&sig->cputimer.lock);
3135}
3136
3137
3138
3139
3140
3141
3142
3143extern void recalc_sigpending_and_wake(struct task_struct *t);
3144extern void recalc_sigpending(void);
3145
3146extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
3147
3148static inline void signal_wake_up(struct task_struct *t, bool resume)
3149{
3150 signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
3151}
3152static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
3153{
3154 signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
3155}
3156
3157
3158
3159
3160#ifdef CONFIG_SMP
3161
3162static inline unsigned int task_cpu(const struct task_struct *p)
3163{
3164 return task_thread_info(p)->cpu;
3165}
3166
3167static inline int task_node(const struct task_struct *p)
3168{
3169 return cpu_to_node(task_cpu(p));
3170}
3171
3172extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
3173
3174#else
3175
3176static inline unsigned int task_cpu(const struct task_struct *p)
3177{
3178 return 0;
3179}
3180
3181static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
3182{
3183}
3184
3185#endif
3186
3187extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
3188extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
3189
3190#ifdef CONFIG_CGROUP_SCHED
3191extern struct task_group root_task_group;
3192#endif
3193
3194extern int task_can_switch_user(struct user_struct *up,
3195 struct task_struct *tsk);
3196
3197#ifdef CONFIG_TASK_XACCT
3198static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
3199{
3200 tsk->ioac.rchar += amt;
3201}
3202
3203static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
3204{
3205 tsk->ioac.wchar += amt;
3206}
3207
3208static inline void inc_syscr(struct task_struct *tsk)
3209{
3210 tsk->ioac.syscr++;
3211}
3212
3213static inline void inc_syscw(struct task_struct *tsk)
3214{
3215 tsk->ioac.syscw++;
3216}
3217#else
3218static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
3219{
3220}
3221
3222static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
3223{
3224}
3225
3226static inline void inc_syscr(struct task_struct *tsk)
3227{
3228}
3229
3230static inline void inc_syscw(struct task_struct *tsk)
3231{
3232}
3233#endif
3234
3235#ifndef TASK_SIZE_OF
3236#define TASK_SIZE_OF(tsk) TASK_SIZE
3237#endif
3238
3239#ifdef CONFIG_MM_OWNER
3240extern void mm_update_next_owner(struct mm_struct *mm);
3241extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
3242#else
3243static inline void mm_update_next_owner(struct mm_struct *mm)
3244{
3245}
3246
3247static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
3248{
3249}
3250#endif
3251
3252static inline unsigned long task_rlimit(const struct task_struct *tsk,
3253 unsigned int limit)
3254{
3255 return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
3256}
3257
3258static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
3259 unsigned int limit)
3260{
3261 return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
3262}
3263
3264static inline unsigned long rlimit(unsigned int limit)
3265{
3266 return task_rlimit(current, limit);
3267}
3268
3269static inline unsigned long rlimit_max(unsigned int limit)
3270{
3271 return task_rlimit_max(current, limit);
3272}
3273
3274#ifdef CONFIG_CPU_FREQ
3275struct update_util_data {
3276 void (*func)(struct update_util_data *data,
3277 u64 time, unsigned long util, unsigned long max);
3278};
3279
3280void cpufreq_set_update_util_data(int cpu, struct update_util_data *data);
3281#endif
3282
3283#endif
3284