1#ifndef _LINUX_SCHED_H
2#define _LINUX_SCHED_H
3
4#include <uapi/linux/sched.h>
5
6#include <linux/sched/prio.h>
7
8
9struct sched_param {
10 int sched_priority;
11};
12
13#include <asm/param.h>
14
15#include <linux/capability.h>
16#include <linux/threads.h>
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/timex.h>
20#include <linux/jiffies.h>
21#include <linux/plist.h>
22#include <linux/rbtree.h>
23#include <linux/thread_info.h>
24#include <linux/cpumask.h>
25#include <linux/errno.h>
26#include <linux/nodemask.h>
27#include <linux/mm_types.h>
28#include <linux/preempt.h>
29
30#include <asm/page.h>
31#include <asm/ptrace.h>
32#include <linux/cputime.h>
33
34#include <linux/smp.h>
35#include <linux/sem.h>
36#include <linux/shm.h>
37#include <linux/signal.h>
38#include <linux/compiler.h>
39#include <linux/completion.h>
40#include <linux/pid.h>
41#include <linux/percpu.h>
42#include <linux/topology.h>
43#include <linux/seccomp.h>
44#include <linux/rcupdate.h>
45#include <linux/rculist.h>
46#include <linux/rtmutex.h>
47
48#include <linux/time.h>
49#include <linux/param.h>
50#include <linux/resource.h>
51#include <linux/timer.h>
52#include <linux/hrtimer.h>
53#include <linux/kcov.h>
54#include <linux/task_io_accounting.h>
55#include <linux/latencytop.h>
56#include <linux/cred.h>
57#include <linux/llist.h>
58#include <linux/uidgid.h>
59#include <linux/gfp.h>
60#include <linux/magic.h>
61#include <linux/cgroup-defs.h>
62
63#include <asm/processor.h>
64
65#define SCHED_ATTR_SIZE_VER0 48
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111struct sched_attr {
112 u32 size;
113
114 u32 sched_policy;
115 u64 sched_flags;
116
117
118 s32 sched_nice;
119
120
121 u32 sched_priority;
122
123
124 u64 sched_runtime;
125 u64 sched_deadline;
126 u64 sched_period;
127};
128
129struct futex_pi_state;
130struct robust_list_head;
131struct bio_list;
132struct fs_struct;
133struct perf_event_context;
134struct blk_plug;
135struct filename;
136struct nameidata;
137
138#define VMACACHE_BITS 2
139#define VMACACHE_SIZE (1U << VMACACHE_BITS)
140#define VMACACHE_MASK (VMACACHE_SIZE - 1)
141
142
143
144
145
146
147
148
149
150
151
152extern unsigned long avenrun[];
153extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
154
155#define FSHIFT 11
156#define FIXED_1 (1<<FSHIFT)
157#define LOAD_FREQ (5*HZ+1)
158#define EXP_1 1884
159#define EXP_5 2014
160#define EXP_15 2037
161
162#define CALC_LOAD(load,exp,n) \
163 load *= exp; \
164 load += n*(FIXED_1-exp); \
165 load >>= FSHIFT;
166
167extern unsigned long total_forks;
168extern int nr_threads;
169DECLARE_PER_CPU(unsigned long, process_counts);
170extern int nr_processes(void);
171extern unsigned long nr_running(void);
172extern bool single_task_running(void);
173extern unsigned long nr_iowait(void);
174extern unsigned long nr_iowait_cpu(int cpu);
175extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
176
177extern void calc_global_load(unsigned long ticks);
178
179#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
180extern void cpu_load_update_nohz_start(void);
181extern void cpu_load_update_nohz_stop(void);
182#else
183static inline void cpu_load_update_nohz_start(void) { }
184static inline void cpu_load_update_nohz_stop(void) { }
185#endif
186
187extern void dump_cpu_task(int cpu);
188
189struct seq_file;
190struct cfs_rq;
191struct task_group;
192#ifdef CONFIG_SCHED_DEBUG
193extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
194extern void proc_sched_set_task(struct task_struct *p);
195#endif
196
197
198
199
200
201
202
203
204
205
206
207#define TASK_RUNNING 0
208#define TASK_INTERRUPTIBLE 1
209#define TASK_UNINTERRUPTIBLE 2
210#define __TASK_STOPPED 4
211#define __TASK_TRACED 8
212
213#define EXIT_DEAD 16
214#define EXIT_ZOMBIE 32
215#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
216
217#define TASK_DEAD 64
218#define TASK_WAKEKILL 128
219#define TASK_WAKING 256
220#define TASK_PARKED 512
221#define TASK_NOLOAD 1024
222#define TASK_NEW 2048
223#define TASK_STATE_MAX 4096
224
225#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn"
226
227extern char ___assert_task_state[1 - 2*!!(
228 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
229
230
231#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
232#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
233#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
234
235#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
236
237
238#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
239#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
240
241
242#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
243 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
244 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
245
246#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
247#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
248#define task_is_stopped_or_traced(task) \
249 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
250#define task_contributes_to_load(task) \
251 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
252 (task->flags & PF_FROZEN) == 0 && \
253 (task->state & TASK_NOLOAD) == 0)
254
255#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
256
257#define __set_task_state(tsk, state_value) \
258 do { \
259 (tsk)->task_state_change = _THIS_IP_; \
260 (tsk)->state = (state_value); \
261 } while (0)
262#define set_task_state(tsk, state_value) \
263 do { \
264 (tsk)->task_state_change = _THIS_IP_; \
265 smp_store_mb((tsk)->state, (state_value)); \
266 } while (0)
267
268
269
270
271
272
273
274
275
276
277
278
279#define __set_current_state(state_value) \
280 do { \
281 current->task_state_change = _THIS_IP_; \
282 current->state = (state_value); \
283 } while (0)
284#define set_current_state(state_value) \
285 do { \
286 current->task_state_change = _THIS_IP_; \
287 smp_store_mb(current->state, (state_value)); \
288 } while (0)
289
290#else
291
292#define __set_task_state(tsk, state_value) \
293 do { (tsk)->state = (state_value); } while (0)
294#define set_task_state(tsk, state_value) \
295 smp_store_mb((tsk)->state, (state_value))
296
297
298
299
300
301
302
303
304
305
306
307
308#define __set_current_state(state_value) \
309 do { current->state = (state_value); } while (0)
310#define set_current_state(state_value) \
311 smp_store_mb(current->state, (state_value))
312
313#endif
314
315
316#define TASK_COMM_LEN 16
317
318#include <linux/spinlock.h>
319
320
321
322
323
324
325
326extern rwlock_t tasklist_lock;
327extern spinlock_t mmlist_lock;
328
329struct task_struct;
330
331#ifdef CONFIG_PROVE_RCU
332extern int lockdep_tasklist_lock_is_held(void);
333#endif
334
335extern void sched_init(void);
336extern void sched_init_smp(void);
337extern asmlinkage void schedule_tail(struct task_struct *prev);
338extern void init_idle(struct task_struct *idle, int cpu);
339extern void init_idle_bootup_task(struct task_struct *idle);
340
341extern cpumask_var_t cpu_isolated_map;
342
343extern int runqueue_is_locked(int cpu);
344
345#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
346extern void nohz_balance_enter_idle(int cpu);
347extern void set_cpu_sd_state_idle(void);
348extern int get_nohz_timer_target(void);
349#else
350static inline void nohz_balance_enter_idle(int cpu) { }
351static inline void set_cpu_sd_state_idle(void) { }
352#endif
353
354
355
356
357extern void show_state_filter(unsigned long state_filter);
358
359static inline void show_state(void)
360{
361 show_state_filter(0);
362}
363
364extern void show_regs(struct pt_regs *);
365
366
367
368
369
370
371extern void show_stack(struct task_struct *task, unsigned long *sp);
372
373extern void cpu_init (void);
374extern void trap_init(void);
375extern void update_process_times(int user);
376extern void scheduler_tick(void);
377extern int sched_cpu_starting(unsigned int cpu);
378extern int sched_cpu_activate(unsigned int cpu);
379extern int sched_cpu_deactivate(unsigned int cpu);
380
381#ifdef CONFIG_HOTPLUG_CPU
382extern int sched_cpu_dying(unsigned int cpu);
383#else
384# define sched_cpu_dying NULL
385#endif
386
387extern void sched_show_task(struct task_struct *p);
388
389#ifdef CONFIG_LOCKUP_DETECTOR
390extern void touch_softlockup_watchdog_sched(void);
391extern void touch_softlockup_watchdog(void);
392extern void touch_softlockup_watchdog_sync(void);
393extern void touch_all_softlockup_watchdogs(void);
394extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
395 void __user *buffer,
396 size_t *lenp, loff_t *ppos);
397extern unsigned int softlockup_panic;
398extern unsigned int hardlockup_panic;
399void lockup_detector_init(void);
400#else
401static inline void touch_softlockup_watchdog_sched(void)
402{
403}
404static inline void touch_softlockup_watchdog(void)
405{
406}
407static inline void touch_softlockup_watchdog_sync(void)
408{
409}
410static inline void touch_all_softlockup_watchdogs(void)
411{
412}
413static inline void lockup_detector_init(void)
414{
415}
416#endif
417
418#ifdef CONFIG_DETECT_HUNG_TASK
419void reset_hung_task_detector(void);
420#else
421static inline void reset_hung_task_detector(void)
422{
423}
424#endif
425
426
427#define __sched __attribute__((__section__(".sched.text")))
428
429
430extern char __sched_text_start[], __sched_text_end[];
431
432
433extern int in_sched_functions(unsigned long addr);
434
435#define MAX_SCHEDULE_TIMEOUT LONG_MAX
436extern signed long schedule_timeout(signed long timeout);
437extern signed long schedule_timeout_interruptible(signed long timeout);
438extern signed long schedule_timeout_killable(signed long timeout);
439extern signed long schedule_timeout_uninterruptible(signed long timeout);
440extern signed long schedule_timeout_idle(signed long timeout);
441asmlinkage void schedule(void);
442extern void schedule_preempt_disabled(void);
443
444extern long io_schedule_timeout(long timeout);
445
446static inline void io_schedule(void)
447{
448 io_schedule_timeout(MAX_SCHEDULE_TIMEOUT);
449}
450
451void __noreturn do_task_dead(void);
452
453struct nsproxy;
454struct user_namespace;
455
456#ifdef CONFIG_MMU
457extern void arch_pick_mmap_layout(struct mm_struct *mm);
458extern unsigned long
459arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
460 unsigned long, unsigned long);
461extern unsigned long
462arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
463 unsigned long len, unsigned long pgoff,
464 unsigned long flags);
465#else
466static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
467#endif
468
469#define SUID_DUMP_DISABLE 0
470#define SUID_DUMP_USER 1
471#define SUID_DUMP_ROOT 2
472
473
474
475
476#define MMF_DUMPABLE_BITS 2
477#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
478
479extern void set_dumpable(struct mm_struct *mm, int value);
480
481
482
483
484
485
486static inline int __get_dumpable(unsigned long mm_flags)
487{
488 return mm_flags & MMF_DUMPABLE_MASK;
489}
490
491static inline int get_dumpable(struct mm_struct *mm)
492{
493 return __get_dumpable(mm->flags);
494}
495
496
497#define MMF_DUMP_ANON_PRIVATE 2
498#define MMF_DUMP_ANON_SHARED 3
499#define MMF_DUMP_MAPPED_PRIVATE 4
500#define MMF_DUMP_MAPPED_SHARED 5
501#define MMF_DUMP_ELF_HEADERS 6
502#define MMF_DUMP_HUGETLB_PRIVATE 7
503#define MMF_DUMP_HUGETLB_SHARED 8
504#define MMF_DUMP_DAX_PRIVATE 9
505#define MMF_DUMP_DAX_SHARED 10
506
507#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
508#define MMF_DUMP_FILTER_BITS 9
509#define MMF_DUMP_FILTER_MASK \
510 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
511#define MMF_DUMP_FILTER_DEFAULT \
512 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\
513 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
514
515#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
516# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS)
517#else
518# define MMF_DUMP_MASK_DEFAULT_ELF 0
519#endif
520
521#define MMF_VM_MERGEABLE 16
522#define MMF_VM_HUGEPAGE 17
523#define MMF_EXE_FILE_CHANGED 18
524
525#define MMF_HAS_UPROBES 19
526#define MMF_RECALC_UPROBES 20
527#define MMF_OOM_SKIP 21
528#define MMF_UNSTABLE 22
529#define MMF_HUGE_ZERO_PAGE 23
530
531#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
532
533struct sighand_struct {
534 atomic_t count;
535 struct k_sigaction action[_NSIG];
536 spinlock_t siglock;
537 wait_queue_head_t signalfd_wqh;
538};
539
540struct pacct_struct {
541 int ac_flag;
542 long ac_exitcode;
543 unsigned long ac_mem;
544 cputime_t ac_utime, ac_stime;
545 unsigned long ac_minflt, ac_majflt;
546};
547
548struct cpu_itimer {
549 cputime_t expires;
550 cputime_t incr;
551 u32 error;
552 u32 incr_error;
553};
554
555
556
557
558
559
560
561
562
563
564struct prev_cputime {
565#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
566 cputime_t utime;
567 cputime_t stime;
568 raw_spinlock_t lock;
569#endif
570};
571
572static inline void prev_cputime_init(struct prev_cputime *prev)
573{
574#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
575 prev->utime = prev->stime = 0;
576 raw_spin_lock_init(&prev->lock);
577#endif
578}
579
580
581
582
583
584
585
586
587
588
589
590struct task_cputime {
591 cputime_t utime;
592 cputime_t stime;
593 unsigned long long sum_exec_runtime;
594};
595
596
597#define virt_exp utime
598#define prof_exp stime
599#define sched_exp sum_exec_runtime
600
601#define INIT_CPUTIME \
602 (struct task_cputime) { \
603 .utime = 0, \
604 .stime = 0, \
605 .sum_exec_runtime = 0, \
606 }
607
608
609
610
611
612struct task_cputime_atomic {
613 atomic64_t utime;
614 atomic64_t stime;
615 atomic64_t sum_exec_runtime;
616};
617
618#define INIT_CPUTIME_ATOMIC \
619 (struct task_cputime_atomic) { \
620 .utime = ATOMIC64_INIT(0), \
621 .stime = ATOMIC64_INIT(0), \
622 .sum_exec_runtime = ATOMIC64_INIT(0), \
623 }
624
625#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
626
627
628
629
630
631
632
633#define INIT_PREEMPT_COUNT PREEMPT_OFFSET
634
635
636
637
638
639
640
641
642
643
644#define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
645
646
647
648
649
650
651
652
653
654
655
656
657struct thread_group_cputimer {
658 struct task_cputime_atomic cputime_atomic;
659 bool running;
660 bool checking_timer;
661};
662
663#include <linux/rwsem.h>
664struct autogroup;
665
666
667
668
669
670
671
672
673struct signal_struct {
674 atomic_t sigcnt;
675 atomic_t live;
676 int nr_threads;
677 struct list_head thread_head;
678
679 wait_queue_head_t wait_chldexit;
680
681
682 struct task_struct *curr_target;
683
684
685 struct sigpending shared_pending;
686
687
688 int group_exit_code;
689
690
691
692
693
694 int notify_count;
695 struct task_struct *group_exit_task;
696
697
698 int group_stop_count;
699 unsigned int flags;
700
701
702
703
704
705
706
707
708
709
710 unsigned int is_child_subreaper:1;
711 unsigned int has_child_subreaper:1;
712
713
714 int posix_timer_id;
715 struct list_head posix_timers;
716
717
718 struct hrtimer real_timer;
719 struct pid *leader_pid;
720 ktime_t it_real_incr;
721
722
723
724
725
726
727 struct cpu_itimer it[2];
728
729
730
731
732
733 struct thread_group_cputimer cputimer;
734
735
736 struct task_cputime cputime_expires;
737
738#ifdef CONFIG_NO_HZ_FULL
739 atomic_t tick_dep_mask;
740#endif
741
742 struct list_head cpu_timers[3];
743
744 struct pid *tty_old_pgrp;
745
746
747 int leader;
748
749 struct tty_struct *tty;
750
751#ifdef CONFIG_SCHED_AUTOGROUP
752 struct autogroup *autogroup;
753#endif
754
755
756
757
758
759
760 seqlock_t stats_lock;
761 cputime_t utime, stime, cutime, cstime;
762 cputime_t gtime;
763 cputime_t cgtime;
764 struct prev_cputime prev_cputime;
765 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
766 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
767 unsigned long inblock, oublock, cinblock, coublock;
768 unsigned long maxrss, cmaxrss;
769 struct task_io_accounting ioac;
770
771
772
773
774
775
776
777 unsigned long long sum_sched_runtime;
778
779
780
781
782
783
784
785
786
787
788 struct rlimit rlim[RLIM_NLIMITS];
789
790#ifdef CONFIG_BSD_PROCESS_ACCT
791 struct pacct_struct pacct;
792#endif
793#ifdef CONFIG_TASKSTATS
794 struct taskstats *stats;
795#endif
796#ifdef CONFIG_AUDIT
797 unsigned audit_tty;
798 struct tty_audit_buf *tty_audit_buf;
799#endif
800
801
802
803
804
805 bool oom_flag_origin;
806 short oom_score_adj;
807 short oom_score_adj_min;
808
809 struct mm_struct *oom_mm;
810
811
812 struct mutex cred_guard_mutex;
813
814
815};
816
817
818
819
820#define SIGNAL_STOP_STOPPED 0x00000001
821#define SIGNAL_STOP_CONTINUED 0x00000002
822#define SIGNAL_GROUP_EXIT 0x00000004
823#define SIGNAL_GROUP_COREDUMP 0x00000008
824
825
826
827#define SIGNAL_CLD_STOPPED 0x00000010
828#define SIGNAL_CLD_CONTINUED 0x00000020
829#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
830
831#define SIGNAL_UNKILLABLE 0x00000040
832
833
834static inline int signal_group_exit(const struct signal_struct *sig)
835{
836 return (sig->flags & SIGNAL_GROUP_EXIT) ||
837 (sig->group_exit_task != NULL);
838}
839
840
841
842
843struct user_struct {
844 atomic_t __count;
845 atomic_t processes;
846 atomic_t sigpending;
847#ifdef CONFIG_INOTIFY_USER
848 atomic_t inotify_watches;
849 atomic_t inotify_devs;
850#endif
851#ifdef CONFIG_FANOTIFY
852 atomic_t fanotify_listeners;
853#endif
854#ifdef CONFIG_EPOLL
855 atomic_long_t epoll_watches;
856#endif
857#ifdef CONFIG_POSIX_MQUEUE
858
859 unsigned long mq_bytes;
860#endif
861 unsigned long locked_shm;
862 unsigned long unix_inflight;
863 atomic_long_t pipe_bufs;
864
865#ifdef CONFIG_KEYS
866 struct key *uid_keyring;
867 struct key *session_keyring;
868#endif
869
870
871 struct hlist_node uidhash_node;
872 kuid_t uid;
873
874#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL)
875 atomic_long_t locked_vm;
876#endif
877};
878
879extern int uids_sysfs_init(void);
880
881extern struct user_struct *find_user(kuid_t);
882
883extern struct user_struct root_user;
884#define INIT_USER (&root_user)
885
886
887struct backing_dev_info;
888struct reclaim_state;
889
890#ifdef CONFIG_SCHED_INFO
891struct sched_info {
892
893 unsigned long pcount;
894 unsigned long long run_delay;
895
896
897 unsigned long long last_arrival,
898 last_queued;
899};
900#endif
901
902#ifdef CONFIG_TASK_DELAY_ACCT
903struct task_delay_info {
904 spinlock_t lock;
905 unsigned int flags;
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922 u64 blkio_start;
923 u64 blkio_delay;
924 u64 swapin_delay;
925 u32 blkio_count;
926
927 u32 swapin_count;
928
929
930 u64 freepages_start;
931 u64 freepages_delay;
932 u32 freepages_count;
933};
934#endif
935
936static inline int sched_info_on(void)
937{
938#ifdef CONFIG_SCHEDSTATS
939 return 1;
940#elif defined(CONFIG_TASK_DELAY_ACCT)
941 extern int delayacct_on;
942 return delayacct_on;
943#else
944 return 0;
945#endif
946}
947
948#ifdef CONFIG_SCHEDSTATS
949void force_schedstat_enabled(void);
950#endif
951
952enum cpu_idle_type {
953 CPU_IDLE,
954 CPU_NOT_IDLE,
955 CPU_NEWLY_IDLE,
956 CPU_MAX_IDLE_TYPES
957};
958
959
960
961
962
963
964
965
966# define SCHED_FIXEDPOINT_SHIFT 10
967# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT)
968
969
970
971
972#define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT
973#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001struct wake_q_node {
1002 struct wake_q_node *next;
1003};
1004
1005struct wake_q_head {
1006 struct wake_q_node *first;
1007 struct wake_q_node **lastp;
1008};
1009
1010#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
1011
1012#define WAKE_Q(name) \
1013 struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
1014
1015extern void wake_q_add(struct wake_q_head *head,
1016 struct task_struct *task);
1017extern void wake_up_q(struct wake_q_head *head);
1018
1019
1020
1021
1022#ifdef CONFIG_SMP
1023#define SD_LOAD_BALANCE 0x0001
1024#define SD_BALANCE_NEWIDLE 0x0002
1025#define SD_BALANCE_EXEC 0x0004
1026#define SD_BALANCE_FORK 0x0008
1027#define SD_BALANCE_WAKE 0x0010
1028#define SD_WAKE_AFFINE 0x0020
1029#define SD_ASYM_CPUCAPACITY 0x0040
1030#define SD_SHARE_CPUCAPACITY 0x0080
1031#define SD_SHARE_POWERDOMAIN 0x0100
1032#define SD_SHARE_PKG_RESOURCES 0x0200
1033#define SD_SERIALIZE 0x0400
1034#define SD_ASYM_PACKING 0x0800
1035#define SD_PREFER_SIBLING 0x1000
1036#define SD_OVERLAP 0x2000
1037#define SD_NUMA 0x4000
1038
1039#ifdef CONFIG_SCHED_SMT
1040static inline int cpu_smt_flags(void)
1041{
1042 return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
1043}
1044#endif
1045
1046#ifdef CONFIG_SCHED_MC
1047static inline int cpu_core_flags(void)
1048{
1049 return SD_SHARE_PKG_RESOURCES;
1050}
1051#endif
1052
1053#ifdef CONFIG_NUMA
1054static inline int cpu_numa_flags(void)
1055{
1056 return SD_NUMA;
1057}
1058#endif
1059
1060struct sched_domain_attr {
1061 int relax_domain_level;
1062};
1063
1064#define SD_ATTR_INIT (struct sched_domain_attr) { \
1065 .relax_domain_level = -1, \
1066}
1067
1068extern int sched_domain_level_max;
1069
1070struct sched_group;
1071
1072struct sched_domain_shared {
1073 atomic_t ref;
1074 atomic_t nr_busy_cpus;
1075 int has_idle_cores;
1076};
1077
1078struct sched_domain {
1079
1080 struct sched_domain *parent;
1081 struct sched_domain *child;
1082 struct sched_group *groups;
1083 unsigned long min_interval;
1084 unsigned long max_interval;
1085 unsigned int busy_factor;
1086 unsigned int imbalance_pct;
1087 unsigned int cache_nice_tries;
1088 unsigned int busy_idx;
1089 unsigned int idle_idx;
1090 unsigned int newidle_idx;
1091 unsigned int wake_idx;
1092 unsigned int forkexec_idx;
1093 unsigned int smt_gain;
1094
1095 int nohz_idle;
1096 int flags;
1097 int level;
1098
1099
1100 unsigned long last_balance;
1101 unsigned int balance_interval;
1102 unsigned int nr_balance_failed;
1103
1104
1105 u64 max_newidle_lb_cost;
1106 unsigned long next_decay_max_lb_cost;
1107
1108 u64 avg_scan_cost;
1109
1110#ifdef CONFIG_SCHEDSTATS
1111
1112 unsigned int lb_count[CPU_MAX_IDLE_TYPES];
1113 unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
1114 unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
1115 unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
1116 unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
1117 unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
1118 unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
1119 unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
1120
1121
1122 unsigned int alb_count;
1123 unsigned int alb_failed;
1124 unsigned int alb_pushed;
1125
1126
1127 unsigned int sbe_count;
1128 unsigned int sbe_balanced;
1129 unsigned int sbe_pushed;
1130
1131
1132 unsigned int sbf_count;
1133 unsigned int sbf_balanced;
1134 unsigned int sbf_pushed;
1135
1136
1137 unsigned int ttwu_wake_remote;
1138 unsigned int ttwu_move_affine;
1139 unsigned int ttwu_move_balance;
1140#endif
1141#ifdef CONFIG_SCHED_DEBUG
1142 char *name;
1143#endif
1144 union {
1145 void *private;
1146 struct rcu_head rcu;
1147 };
1148 struct sched_domain_shared *shared;
1149
1150 unsigned int span_weight;
1151
1152
1153
1154
1155
1156
1157
1158 unsigned long span[0];
1159};
1160
1161static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
1162{
1163 return to_cpumask(sd->span);
1164}
1165
1166extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1167 struct sched_domain_attr *dattr_new);
1168
1169
1170cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
1171void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
1172
1173bool cpus_share_cache(int this_cpu, int that_cpu);
1174
1175typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
1176typedef int (*sched_domain_flags_f)(void);
1177
1178#define SDTL_OVERLAP 0x01
1179
1180struct sd_data {
1181 struct sched_domain **__percpu sd;
1182 struct sched_domain_shared **__percpu sds;
1183 struct sched_group **__percpu sg;
1184 struct sched_group_capacity **__percpu sgc;
1185};
1186
1187struct sched_domain_topology_level {
1188 sched_domain_mask_f mask;
1189 sched_domain_flags_f sd_flags;
1190 int flags;
1191 int numa_level;
1192 struct sd_data data;
1193#ifdef CONFIG_SCHED_DEBUG
1194 char *name;
1195#endif
1196};
1197
1198extern void set_sched_topology(struct sched_domain_topology_level *tl);
1199extern void wake_up_if_idle(int cpu);
1200
1201#ifdef CONFIG_SCHED_DEBUG
1202# define SD_INIT_NAME(type) .name = #type
1203#else
1204# define SD_INIT_NAME(type)
1205#endif
1206
1207#else
1208
1209struct sched_domain_attr;
1210
1211static inline void
1212partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1213 struct sched_domain_attr *dattr_new)
1214{
1215}
1216
1217static inline bool cpus_share_cache(int this_cpu, int that_cpu)
1218{
1219 return true;
1220}
1221
1222#endif
1223
1224
1225struct io_context;
1226
1227
1228#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
1229extern void prefetch_stack(struct task_struct *t);
1230#else
1231static inline void prefetch_stack(struct task_struct *t) { }
1232#endif
1233
1234struct audit_context;
1235struct mempolicy;
1236struct pipe_inode_info;
1237struct uts_namespace;
1238
1239struct load_weight {
1240 unsigned long weight;
1241 u32 inv_weight;
1242};
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296struct sched_avg {
1297 u64 last_update_time, load_sum;
1298 u32 util_sum, period_contrib;
1299 unsigned long load_avg, util_avg;
1300};
1301
1302#ifdef CONFIG_SCHEDSTATS
1303struct sched_statistics {
1304 u64 wait_start;
1305 u64 wait_max;
1306 u64 wait_count;
1307 u64 wait_sum;
1308 u64 iowait_count;
1309 u64 iowait_sum;
1310
1311 u64 sleep_start;
1312 u64 sleep_max;
1313 s64 sum_sleep_runtime;
1314
1315 u64 block_start;
1316 u64 block_max;
1317 u64 exec_max;
1318 u64 slice_max;
1319
1320 u64 nr_migrations_cold;
1321 u64 nr_failed_migrations_affine;
1322 u64 nr_failed_migrations_running;
1323 u64 nr_failed_migrations_hot;
1324 u64 nr_forced_migrations;
1325
1326 u64 nr_wakeups;
1327 u64 nr_wakeups_sync;
1328 u64 nr_wakeups_migrate;
1329 u64 nr_wakeups_local;
1330 u64 nr_wakeups_remote;
1331 u64 nr_wakeups_affine;
1332 u64 nr_wakeups_affine_attempts;
1333 u64 nr_wakeups_passive;
1334 u64 nr_wakeups_idle;
1335};
1336#endif
1337
1338struct sched_entity {
1339 struct load_weight load;
1340 struct rb_node run_node;
1341 struct list_head group_node;
1342 unsigned int on_rq;
1343
1344 u64 exec_start;
1345 u64 sum_exec_runtime;
1346 u64 vruntime;
1347 u64 prev_sum_exec_runtime;
1348
1349 u64 nr_migrations;
1350
1351#ifdef CONFIG_SCHEDSTATS
1352 struct sched_statistics statistics;
1353#endif
1354
1355#ifdef CONFIG_FAIR_GROUP_SCHED
1356 int depth;
1357 struct sched_entity *parent;
1358
1359 struct cfs_rq *cfs_rq;
1360
1361 struct cfs_rq *my_q;
1362#endif
1363
1364#ifdef CONFIG_SMP
1365
1366
1367
1368
1369
1370
1371 struct sched_avg avg ____cacheline_aligned_in_smp;
1372#endif
1373};
1374
1375struct sched_rt_entity {
1376 struct list_head run_list;
1377 unsigned long timeout;
1378 unsigned long watchdog_stamp;
1379 unsigned int time_slice;
1380 unsigned short on_rq;
1381 unsigned short on_list;
1382
1383 struct sched_rt_entity *back;
1384#ifdef CONFIG_RT_GROUP_SCHED
1385 struct sched_rt_entity *parent;
1386
1387 struct rt_rq *rt_rq;
1388
1389 struct rt_rq *my_q;
1390#endif
1391};
1392
1393struct sched_dl_entity {
1394 struct rb_node rb_node;
1395
1396
1397
1398
1399
1400
1401 u64 dl_runtime;
1402 u64 dl_deadline;
1403 u64 dl_period;
1404 u64 dl_bw;
1405
1406
1407
1408
1409
1410
1411 s64 runtime;
1412 u64 deadline;
1413 unsigned int flags;
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429 int dl_throttled, dl_boosted, dl_yielded;
1430
1431
1432
1433
1434
1435 struct hrtimer dl_timer;
1436};
1437
1438union rcu_special {
1439 struct {
1440 u8 blocked;
1441 u8 need_qs;
1442 u8 exp_need_qs;
1443 u8 pad;
1444 } b;
1445 u32 s;
1446};
1447struct rcu_node;
1448
1449enum perf_event_task_context {
1450 perf_invalid_context = -1,
1451 perf_hw_context = 0,
1452 perf_sw_context,
1453 perf_nr_task_contexts,
1454};
1455
1456
1457struct tlbflush_unmap_batch {
1458
1459
1460
1461
1462 struct cpumask cpumask;
1463
1464
1465 bool flush_required;
1466
1467
1468
1469
1470
1471
1472 bool writable;
1473};
1474
1475struct task_struct {
1476#ifdef CONFIG_THREAD_INFO_IN_TASK
1477
1478
1479
1480
1481 struct thread_info thread_info;
1482#endif
1483 volatile long state;
1484 void *stack;
1485 atomic_t usage;
1486 unsigned int flags;
1487 unsigned int ptrace;
1488
1489#ifdef CONFIG_SMP
1490 struct llist_node wake_entry;
1491 int on_cpu;
1492#ifdef CONFIG_THREAD_INFO_IN_TASK
1493 unsigned int cpu;
1494#endif
1495 unsigned int wakee_flips;
1496 unsigned long wakee_flip_decay_ts;
1497 struct task_struct *last_wakee;
1498
1499 int wake_cpu;
1500#endif
1501 int on_rq;
1502
1503 int prio, static_prio, normal_prio;
1504 unsigned int rt_priority;
1505 const struct sched_class *sched_class;
1506 struct sched_entity se;
1507 struct sched_rt_entity rt;
1508#ifdef CONFIG_CGROUP_SCHED
1509 struct task_group *sched_task_group;
1510#endif
1511 struct sched_dl_entity dl;
1512
1513#ifdef CONFIG_PREEMPT_NOTIFIERS
1514
1515 struct hlist_head preempt_notifiers;
1516#endif
1517
1518#ifdef CONFIG_BLK_DEV_IO_TRACE
1519 unsigned int btrace_seq;
1520#endif
1521
1522 unsigned int policy;
1523 int nr_cpus_allowed;
1524 cpumask_t cpus_allowed;
1525
1526#ifdef CONFIG_PREEMPT_RCU
1527 int rcu_read_lock_nesting;
1528 union rcu_special rcu_read_unlock_special;
1529 struct list_head rcu_node_entry;
1530 struct rcu_node *rcu_blocked_node;
1531#endif
1532#ifdef CONFIG_TASKS_RCU
1533 unsigned long rcu_tasks_nvcsw;
1534 bool rcu_tasks_holdout;
1535 struct list_head rcu_tasks_holdout_list;
1536 int rcu_tasks_idle_cpu;
1537#endif
1538
1539#ifdef CONFIG_SCHED_INFO
1540 struct sched_info sched_info;
1541#endif
1542
1543 struct list_head tasks;
1544#ifdef CONFIG_SMP
1545 struct plist_node pushable_tasks;
1546 struct rb_node pushable_dl_tasks;
1547#endif
1548
1549 struct mm_struct *mm, *active_mm;
1550
1551 u32 vmacache_seqnum;
1552 struct vm_area_struct *vmacache[VMACACHE_SIZE];
1553#if defined(SPLIT_RSS_COUNTING)
1554 struct task_rss_stat rss_stat;
1555#endif
1556
1557 int exit_state;
1558 int exit_code, exit_signal;
1559 int pdeath_signal;
1560 unsigned long jobctl;
1561
1562
1563 unsigned int personality;
1564
1565
1566 unsigned sched_reset_on_fork:1;
1567 unsigned sched_contributes_to_load:1;
1568 unsigned sched_migrated:1;
1569 unsigned sched_remote_wakeup:1;
1570 unsigned :0;
1571
1572
1573 unsigned in_execve:1;
1574 unsigned in_iowait:1;
1575#if !defined(TIF_RESTORE_SIGMASK)
1576 unsigned restore_sigmask:1;
1577#endif
1578#ifdef CONFIG_MEMCG
1579 unsigned memcg_may_oom:1;
1580#ifndef CONFIG_SLOB
1581 unsigned memcg_kmem_skip_account:1;
1582#endif
1583#endif
1584#ifdef CONFIG_COMPAT_BRK
1585 unsigned brk_randomized:1;
1586#endif
1587
1588 unsigned long atomic_flags;
1589
1590 struct restart_block restart_block;
1591
1592 pid_t pid;
1593 pid_t tgid;
1594
1595#ifdef CONFIG_CC_STACKPROTECTOR
1596
1597 unsigned long stack_canary;
1598#endif
1599
1600
1601
1602
1603
1604 struct task_struct __rcu *real_parent;
1605 struct task_struct __rcu *parent;
1606
1607
1608
1609 struct list_head children;
1610 struct list_head sibling;
1611 struct task_struct *group_leader;
1612
1613
1614
1615
1616
1617
1618 struct list_head ptraced;
1619 struct list_head ptrace_entry;
1620
1621
1622 struct pid_link pids[PIDTYPE_MAX];
1623 struct list_head thread_group;
1624 struct list_head thread_node;
1625
1626 struct completion *vfork_done;
1627 int __user *set_child_tid;
1628 int __user *clear_child_tid;
1629
1630 cputime_t utime, stime, utimescaled, stimescaled;
1631 cputime_t gtime;
1632 struct prev_cputime prev_cputime;
1633#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1634 seqcount_t vtime_seqcount;
1635 unsigned long long vtime_snap;
1636 enum {
1637
1638 VTIME_INACTIVE = 0,
1639
1640 VTIME_USER,
1641
1642 VTIME_SYS,
1643 } vtime_snap_whence;
1644#endif
1645
1646#ifdef CONFIG_NO_HZ_FULL
1647 atomic_t tick_dep_mask;
1648#endif
1649 unsigned long nvcsw, nivcsw;
1650 u64 start_time;
1651 u64 real_start_time;
1652
1653 unsigned long min_flt, maj_flt;
1654
1655 struct task_cputime cputime_expires;
1656 struct list_head cpu_timers[3];
1657
1658
1659 const struct cred __rcu *real_cred;
1660
1661 const struct cred __rcu *cred;
1662
1663 char comm[TASK_COMM_LEN];
1664
1665
1666
1667
1668 struct nameidata *nameidata;
1669#ifdef CONFIG_SYSVIPC
1670
1671 struct sysv_sem sysvsem;
1672 struct sysv_shm sysvshm;
1673#endif
1674#ifdef CONFIG_DETECT_HUNG_TASK
1675
1676 unsigned long last_switch_count;
1677#endif
1678
1679 struct fs_struct *fs;
1680
1681 struct files_struct *files;
1682
1683 struct nsproxy *nsproxy;
1684
1685 struct signal_struct *signal;
1686 struct sighand_struct *sighand;
1687
1688 sigset_t blocked, real_blocked;
1689 sigset_t saved_sigmask;
1690 struct sigpending pending;
1691
1692 unsigned long sas_ss_sp;
1693 size_t sas_ss_size;
1694 unsigned sas_ss_flags;
1695
1696 struct callback_head *task_works;
1697
1698 struct audit_context *audit_context;
1699#ifdef CONFIG_AUDITSYSCALL
1700 kuid_t loginuid;
1701 unsigned int sessionid;
1702#endif
1703 struct seccomp seccomp;
1704
1705
1706 u32 parent_exec_id;
1707 u32 self_exec_id;
1708
1709
1710 spinlock_t alloc_lock;
1711
1712
1713 raw_spinlock_t pi_lock;
1714
1715 struct wake_q_node wake_q;
1716
1717#ifdef CONFIG_RT_MUTEXES
1718
1719 struct rb_root pi_waiters;
1720 struct rb_node *pi_waiters_leftmost;
1721
1722 struct rt_mutex_waiter *pi_blocked_on;
1723#endif
1724
1725#ifdef CONFIG_DEBUG_MUTEXES
1726
1727 struct mutex_waiter *blocked_on;
1728#endif
1729#ifdef CONFIG_TRACE_IRQFLAGS
1730 unsigned int irq_events;
1731 unsigned long hardirq_enable_ip;
1732 unsigned long hardirq_disable_ip;
1733 unsigned int hardirq_enable_event;
1734 unsigned int hardirq_disable_event;
1735 int hardirqs_enabled;
1736 int hardirq_context;
1737 unsigned long softirq_disable_ip;
1738 unsigned long softirq_enable_ip;
1739 unsigned int softirq_disable_event;
1740 unsigned int softirq_enable_event;
1741 int softirqs_enabled;
1742 int softirq_context;
1743#endif
1744#ifdef CONFIG_LOCKDEP
1745# define MAX_LOCK_DEPTH 48UL
1746 u64 curr_chain_key;
1747 int lockdep_depth;
1748 unsigned int lockdep_recursion;
1749 struct held_lock held_locks[MAX_LOCK_DEPTH];
1750 gfp_t lockdep_reclaim_gfp;
1751#endif
1752#ifdef CONFIG_UBSAN
1753 unsigned int in_ubsan;
1754#endif
1755
1756
1757 void *journal_info;
1758
1759
1760 struct bio_list *bio_list;
1761
1762#ifdef CONFIG_BLOCK
1763
1764 struct blk_plug *plug;
1765#endif
1766
1767
1768 struct reclaim_state *reclaim_state;
1769
1770 struct backing_dev_info *backing_dev_info;
1771
1772 struct io_context *io_context;
1773
1774 unsigned long ptrace_message;
1775 siginfo_t *last_siginfo;
1776 struct task_io_accounting ioac;
1777#if defined(CONFIG_TASK_XACCT)
1778 u64 acct_rss_mem1;
1779 u64 acct_vm_mem1;
1780 cputime_t acct_timexpd;
1781#endif
1782#ifdef CONFIG_CPUSETS
1783 nodemask_t mems_allowed;
1784 seqcount_t mems_allowed_seq;
1785 int cpuset_mem_spread_rotor;
1786 int cpuset_slab_spread_rotor;
1787#endif
1788#ifdef CONFIG_CGROUPS
1789
1790 struct css_set __rcu *cgroups;
1791
1792 struct list_head cg_list;
1793#endif
1794#ifdef CONFIG_FUTEX
1795 struct robust_list_head __user *robust_list;
1796#ifdef CONFIG_COMPAT
1797 struct compat_robust_list_head __user *compat_robust_list;
1798#endif
1799 struct list_head pi_state_list;
1800 struct futex_pi_state *pi_state_cache;
1801#endif
1802#ifdef CONFIG_PERF_EVENTS
1803 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1804 struct mutex perf_event_mutex;
1805 struct list_head perf_event_list;
1806#endif
1807#ifdef CONFIG_DEBUG_PREEMPT
1808 unsigned long preempt_disable_ip;
1809#endif
1810#ifdef CONFIG_NUMA
1811 struct mempolicy *mempolicy;
1812 short il_next;
1813 short pref_node_fork;
1814#endif
1815#ifdef CONFIG_NUMA_BALANCING
1816 int numa_scan_seq;
1817 unsigned int numa_scan_period;
1818 unsigned int numa_scan_period_max;
1819 int numa_preferred_nid;
1820 unsigned long numa_migrate_retry;
1821 u64 node_stamp;
1822 u64 last_task_numa_placement;
1823 u64 last_sum_exec_runtime;
1824 struct callback_head numa_work;
1825
1826 struct list_head numa_entry;
1827 struct numa_group *numa_group;
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843 unsigned long *numa_faults;
1844 unsigned long total_numa_faults;
1845
1846
1847
1848
1849
1850
1851
1852 unsigned long numa_faults_locality[3];
1853
1854 unsigned long numa_pages_migrated;
1855#endif
1856
1857#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
1858 struct tlbflush_unmap_batch tlb_ubc;
1859#endif
1860
1861 struct rcu_head rcu;
1862
1863
1864
1865
1866 struct pipe_inode_info *splice_pipe;
1867
1868 struct page_frag task_frag;
1869
1870#ifdef CONFIG_TASK_DELAY_ACCT
1871 struct task_delay_info *delays;
1872#endif
1873#ifdef CONFIG_FAULT_INJECTION
1874 int make_it_fail;
1875#endif
1876
1877
1878
1879
1880 int nr_dirtied;
1881 int nr_dirtied_pause;
1882 unsigned long dirty_paused_when;
1883
1884#ifdef CONFIG_LATENCYTOP
1885 int latency_record_count;
1886 struct latency_record latency_record[LT_SAVECOUNT];
1887#endif
1888
1889
1890
1891
1892 u64 timer_slack_ns;
1893 u64 default_timer_slack_ns;
1894
1895#ifdef CONFIG_KASAN
1896 unsigned int kasan_depth;
1897#endif
1898#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1899
1900 int curr_ret_stack;
1901
1902 struct ftrace_ret_stack *ret_stack;
1903
1904 unsigned long long ftrace_timestamp;
1905
1906
1907
1908
1909 atomic_t trace_overrun;
1910
1911 atomic_t tracing_graph_pause;
1912#endif
1913#ifdef CONFIG_TRACING
1914
1915 unsigned long trace;
1916
1917 unsigned long trace_recursion;
1918#endif
1919#ifdef CONFIG_KCOV
1920
1921 enum kcov_mode kcov_mode;
1922
1923 unsigned kcov_size;
1924
1925 void *kcov_area;
1926
1927 struct kcov *kcov;
1928#endif
1929#ifdef CONFIG_MEMCG
1930 struct mem_cgroup *memcg_in_oom;
1931 gfp_t memcg_oom_gfp_mask;
1932 int memcg_oom_order;
1933
1934
1935 unsigned int memcg_nr_pages_over_high;
1936#endif
1937#ifdef CONFIG_UPROBES
1938 struct uprobe_task *utask;
1939#endif
1940#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1941 unsigned int sequential_io;
1942 unsigned int sequential_io_avg;
1943#endif
1944#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1945 unsigned long task_state_change;
1946#endif
1947 int pagefault_disabled;
1948#ifdef CONFIG_MMU
1949 struct task_struct *oom_reaper_list;
1950#endif
1951#ifdef CONFIG_VMAP_STACK
1952 struct vm_struct *stack_vm_area;
1953#endif
1954#ifdef CONFIG_THREAD_INFO_IN_TASK
1955
1956 atomic_t stack_refcount;
1957#endif
1958
1959 struct thread_struct thread;
1960
1961
1962
1963
1964
1965
1966};
1967
1968#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
1969extern int arch_task_struct_size __read_mostly;
1970#else
1971# define arch_task_struct_size (sizeof(struct task_struct))
1972#endif
1973
1974#ifdef CONFIG_VMAP_STACK
1975static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
1976{
1977 return t->stack_vm_area;
1978}
1979#else
1980static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
1981{
1982 return NULL;
1983}
1984#endif
1985
1986
1987#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1988
1989static inline int tsk_nr_cpus_allowed(struct task_struct *p)
1990{
1991 return p->nr_cpus_allowed;
1992}
1993
1994#define TNF_MIGRATED 0x01
1995#define TNF_NO_GROUP 0x02
1996#define TNF_SHARED 0x04
1997#define TNF_FAULT_LOCAL 0x08
1998#define TNF_MIGRATE_FAIL 0x10
1999
2000static inline bool in_vfork(struct task_struct *tsk)
2001{
2002 bool ret;
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019 rcu_read_lock();
2020 ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm;
2021 rcu_read_unlock();
2022
2023 return ret;
2024}
2025
2026#ifdef CONFIG_NUMA_BALANCING
2027extern void task_numa_fault(int last_node, int node, int pages, int flags);
2028extern pid_t task_numa_group_id(struct task_struct *p);
2029extern void set_numabalancing_state(bool enabled);
2030extern void task_numa_free(struct task_struct *p);
2031extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
2032 int src_nid, int dst_cpu);
2033#else
2034static inline void task_numa_fault(int last_node, int node, int pages,
2035 int flags)
2036{
2037}
2038static inline pid_t task_numa_group_id(struct task_struct *p)
2039{
2040 return 0;
2041}
2042static inline void set_numabalancing_state(bool enabled)
2043{
2044}
2045static inline void task_numa_free(struct task_struct *p)
2046{
2047}
2048static inline bool should_numa_migrate_memory(struct task_struct *p,
2049 struct page *page, int src_nid, int dst_cpu)
2050{
2051 return true;
2052}
2053#endif
2054
2055static inline struct pid *task_pid(struct task_struct *task)
2056{
2057 return task->pids[PIDTYPE_PID].pid;
2058}
2059
2060static inline struct pid *task_tgid(struct task_struct *task)
2061{
2062 return task->group_leader->pids[PIDTYPE_PID].pid;
2063}
2064
2065
2066
2067
2068
2069
2070static inline struct pid *task_pgrp(struct task_struct *task)
2071{
2072 return task->group_leader->pids[PIDTYPE_PGID].pid;
2073}
2074
2075static inline struct pid *task_session(struct task_struct *task)
2076{
2077 return task->group_leader->pids[PIDTYPE_SID].pid;
2078}
2079
2080struct pid_namespace;
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
2096 struct pid_namespace *ns);
2097
2098static inline pid_t task_pid_nr(struct task_struct *tsk)
2099{
2100 return tsk->pid;
2101}
2102
2103static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
2104 struct pid_namespace *ns)
2105{
2106 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
2107}
2108
2109static inline pid_t task_pid_vnr(struct task_struct *tsk)
2110{
2111 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
2112}
2113
2114
2115static inline pid_t task_tgid_nr(struct task_struct *tsk)
2116{
2117 return tsk->tgid;
2118}
2119
2120pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
2121
2122static inline pid_t task_tgid_vnr(struct task_struct *tsk)
2123{
2124 return pid_vnr(task_tgid(tsk));
2125}
2126
2127
2128static inline int pid_alive(const struct task_struct *p);
2129static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
2130{
2131 pid_t pid = 0;
2132
2133 rcu_read_lock();
2134 if (pid_alive(tsk))
2135 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
2136 rcu_read_unlock();
2137
2138 return pid;
2139}
2140
2141static inline pid_t task_ppid_nr(const struct task_struct *tsk)
2142{
2143 return task_ppid_nr_ns(tsk, &init_pid_ns);
2144}
2145
2146static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
2147 struct pid_namespace *ns)
2148{
2149 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
2150}
2151
2152static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
2153{
2154 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
2155}
2156
2157
2158static inline pid_t task_session_nr_ns(struct task_struct *tsk,
2159 struct pid_namespace *ns)
2160{
2161 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
2162}
2163
2164static inline pid_t task_session_vnr(struct task_struct *tsk)
2165{
2166 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
2167}
2168
2169
2170static inline pid_t task_pgrp_nr(struct task_struct *tsk)
2171{
2172 return task_pgrp_nr_ns(tsk, &init_pid_ns);
2173}
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185static inline int pid_alive(const struct task_struct *p)
2186{
2187 return p->pids[PIDTYPE_PID].pid != NULL;
2188}
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199static inline int is_global_init(struct task_struct *tsk)
2200{
2201 return task_tgid_nr(tsk) == 1;
2202}
2203
2204extern struct pid *cad_pid;
2205
2206extern void free_task(struct task_struct *tsk);
2207#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
2208
2209extern void __put_task_struct(struct task_struct *t);
2210
2211static inline void put_task_struct(struct task_struct *t)
2212{
2213 if (atomic_dec_and_test(&t->usage))
2214 __put_task_struct(t);
2215}
2216
2217struct task_struct *task_rcu_dereference(struct task_struct **ptask);
2218struct task_struct *try_get_task_struct(struct task_struct **ptask);
2219
2220#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
2221extern void task_cputime(struct task_struct *t,
2222 cputime_t *utime, cputime_t *stime);
2223extern void task_cputime_scaled(struct task_struct *t,
2224 cputime_t *utimescaled, cputime_t *stimescaled);
2225extern cputime_t task_gtime(struct task_struct *t);
2226#else
2227static inline void task_cputime(struct task_struct *t,
2228 cputime_t *utime, cputime_t *stime)
2229{
2230 if (utime)
2231 *utime = t->utime;
2232 if (stime)
2233 *stime = t->stime;
2234}
2235
2236static inline void task_cputime_scaled(struct task_struct *t,
2237 cputime_t *utimescaled,
2238 cputime_t *stimescaled)
2239{
2240 if (utimescaled)
2241 *utimescaled = t->utimescaled;
2242 if (stimescaled)
2243 *stimescaled = t->stimescaled;
2244}
2245
2246static inline cputime_t task_gtime(struct task_struct *t)
2247{
2248 return t->gtime;
2249}
2250#endif
2251extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
2252extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
2253
2254
2255
2256
2257#define PF_EXITING 0x00000004
2258#define PF_EXITPIDONE 0x00000008
2259#define PF_VCPU 0x00000010
2260#define PF_WQ_WORKER 0x00000020
2261#define PF_FORKNOEXEC 0x00000040
2262#define PF_MCE_PROCESS 0x00000080
2263#define PF_SUPERPRIV 0x00000100
2264#define PF_DUMPCORE 0x00000200
2265#define PF_SIGNALED 0x00000400
2266#define PF_MEMALLOC 0x00000800
2267#define PF_NPROC_EXCEEDED 0x00001000
2268#define PF_USED_MATH 0x00002000
2269#define PF_USED_ASYNC 0x00004000
2270#define PF_NOFREEZE 0x00008000
2271#define PF_FROZEN 0x00010000
2272#define PF_FSTRANS 0x00020000
2273#define PF_KSWAPD 0x00040000
2274#define PF_MEMALLOC_NOIO 0x00080000
2275#define PF_LESS_THROTTLE 0x00100000
2276#define PF_KTHREAD 0x00200000
2277#define PF_RANDOMIZE 0x00400000
2278#define PF_SWAPWRITE 0x00800000
2279#define PF_NO_SETAFFINITY 0x04000000
2280#define PF_MCE_EARLY 0x08000000
2281#define PF_MUTEX_TESTER 0x20000000
2282#define PF_FREEZER_SKIP 0x40000000
2283#define PF_SUSPEND_TASK 0x80000000
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
2297#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
2298#define clear_used_math() clear_stopped_child_used_math(current)
2299#define set_used_math() set_stopped_child_used_math(current)
2300#define conditional_stopped_child_used_math(condition, child) \
2301 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
2302#define conditional_used_math(condition) \
2303 conditional_stopped_child_used_math(condition, current)
2304#define copy_to_stopped_child_used_math(child) \
2305 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
2306
2307#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
2308#define used_math() tsk_used_math(current)
2309
2310
2311
2312
2313static inline gfp_t memalloc_noio_flags(gfp_t flags)
2314{
2315 if (unlikely(current->flags & PF_MEMALLOC_NOIO))
2316 flags &= ~(__GFP_IO | __GFP_FS);
2317 return flags;
2318}
2319
2320static inline unsigned int memalloc_noio_save(void)
2321{
2322 unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
2323 current->flags |= PF_MEMALLOC_NOIO;
2324 return flags;
2325}
2326
2327static inline void memalloc_noio_restore(unsigned int flags)
2328{
2329 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
2330}
2331
2332
2333#define PFA_NO_NEW_PRIVS 0
2334#define PFA_SPREAD_PAGE 1
2335#define PFA_SPREAD_SLAB 2
2336#define PFA_LMK_WAITING 3
2337
2338
2339#define TASK_PFA_TEST(name, func) \
2340 static inline bool task_##func(struct task_struct *p) \
2341 { return test_bit(PFA_##name, &p->atomic_flags); }
2342#define TASK_PFA_SET(name, func) \
2343 static inline void task_set_##func(struct task_struct *p) \
2344 { set_bit(PFA_##name, &p->atomic_flags); }
2345#define TASK_PFA_CLEAR(name, func) \
2346 static inline void task_clear_##func(struct task_struct *p) \
2347 { clear_bit(PFA_##name, &p->atomic_flags); }
2348
2349TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
2350TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
2351
2352TASK_PFA_TEST(SPREAD_PAGE, spread_page)
2353TASK_PFA_SET(SPREAD_PAGE, spread_page)
2354TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
2355
2356TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
2357TASK_PFA_SET(SPREAD_SLAB, spread_slab)
2358TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
2359
2360TASK_PFA_TEST(LMK_WAITING, lmk_waiting)
2361TASK_PFA_SET(LMK_WAITING, lmk_waiting)
2362
2363
2364
2365
2366#define JOBCTL_STOP_SIGMASK 0xffff
2367
2368#define JOBCTL_STOP_DEQUEUED_BIT 16
2369#define JOBCTL_STOP_PENDING_BIT 17
2370#define JOBCTL_STOP_CONSUME_BIT 18
2371#define JOBCTL_TRAP_STOP_BIT 19
2372#define JOBCTL_TRAP_NOTIFY_BIT 20
2373#define JOBCTL_TRAPPING_BIT 21
2374#define JOBCTL_LISTENING_BIT 22
2375
2376#define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT)
2377#define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT)
2378#define JOBCTL_STOP_CONSUME (1UL << JOBCTL_STOP_CONSUME_BIT)
2379#define JOBCTL_TRAP_STOP (1UL << JOBCTL_TRAP_STOP_BIT)
2380#define JOBCTL_TRAP_NOTIFY (1UL << JOBCTL_TRAP_NOTIFY_BIT)
2381#define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT)
2382#define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT)
2383
2384#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
2385#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
2386
2387extern bool task_set_jobctl_pending(struct task_struct *task,
2388 unsigned long mask);
2389extern void task_clear_jobctl_trapping(struct task_struct *task);
2390extern void task_clear_jobctl_pending(struct task_struct *task,
2391 unsigned long mask);
2392
2393static inline void rcu_copy_process(struct task_struct *p)
2394{
2395#ifdef CONFIG_PREEMPT_RCU
2396 p->rcu_read_lock_nesting = 0;
2397 p->rcu_read_unlock_special.s = 0;
2398 p->rcu_blocked_node = NULL;
2399 INIT_LIST_HEAD(&p->rcu_node_entry);
2400#endif
2401#ifdef CONFIG_TASKS_RCU
2402 p->rcu_tasks_holdout = false;
2403 INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
2404 p->rcu_tasks_idle_cpu = -1;
2405#endif
2406}
2407
2408static inline void tsk_restore_flags(struct task_struct *task,
2409 unsigned long orig_flags, unsigned long flags)
2410{
2411 task->flags &= ~flags;
2412 task->flags |= orig_flags & flags;
2413}
2414
2415extern int cpuset_cpumask_can_shrink(const struct cpumask *cur,
2416 const struct cpumask *trial);
2417extern int task_can_attach(struct task_struct *p,
2418 const struct cpumask *cs_cpus_allowed);
2419#ifdef CONFIG_SMP
2420extern void do_set_cpus_allowed(struct task_struct *p,
2421 const struct cpumask *new_mask);
2422
2423extern int set_cpus_allowed_ptr(struct task_struct *p,
2424 const struct cpumask *new_mask);
2425#else
2426static inline void do_set_cpus_allowed(struct task_struct *p,
2427 const struct cpumask *new_mask)
2428{
2429}
2430static inline int set_cpus_allowed_ptr(struct task_struct *p,
2431 const struct cpumask *new_mask)
2432{
2433 if (!cpumask_test_cpu(0, new_mask))
2434 return -EINVAL;
2435 return 0;
2436}
2437#endif
2438
2439#ifdef CONFIG_NO_HZ_COMMON
2440void calc_load_enter_idle(void);
2441void calc_load_exit_idle(void);
2442#else
2443static inline void calc_load_enter_idle(void) { }
2444static inline void calc_load_exit_idle(void) { }
2445#endif
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455extern unsigned long long notrace sched_clock(void);
2456
2457
2458
2459extern u64 running_clock(void);
2460extern u64 sched_clock_cpu(int cpu);
2461
2462
2463extern void sched_clock_init(void);
2464
2465#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2466static inline void sched_clock_tick(void)
2467{
2468}
2469
2470static inline void sched_clock_idle_sleep_event(void)
2471{
2472}
2473
2474static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
2475{
2476}
2477
2478static inline u64 cpu_clock(int cpu)
2479{
2480 return sched_clock();
2481}
2482
2483static inline u64 local_clock(void)
2484{
2485 return sched_clock();
2486}
2487#else
2488
2489
2490
2491
2492
2493
2494extern int sched_clock_stable(void);
2495extern void set_sched_clock_stable(void);
2496extern void clear_sched_clock_stable(void);
2497
2498extern void sched_clock_tick(void);
2499extern void sched_clock_idle_sleep_event(void);
2500extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512static inline u64 cpu_clock(int cpu)
2513{
2514 return sched_clock_cpu(cpu);
2515}
2516
2517static inline u64 local_clock(void)
2518{
2519 return sched_clock_cpu(raw_smp_processor_id());
2520}
2521#endif
2522
2523#ifdef CONFIG_IRQ_TIME_ACCOUNTING
2524
2525
2526
2527
2528
2529extern void enable_sched_clock_irqtime(void);
2530extern void disable_sched_clock_irqtime(void);
2531#else
2532static inline void enable_sched_clock_irqtime(void) {}
2533static inline void disable_sched_clock_irqtime(void) {}
2534#endif
2535
2536extern unsigned long long
2537task_sched_runtime(struct task_struct *task);
2538
2539
2540#ifdef CONFIG_SMP
2541extern void sched_exec(void);
2542#else
2543#define sched_exec() {}
2544#endif
2545
2546extern void sched_clock_idle_sleep_event(void);
2547extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2548
2549#ifdef CONFIG_HOTPLUG_CPU
2550extern void idle_task_exit(void);
2551#else
2552static inline void idle_task_exit(void) {}
2553#endif
2554
2555#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
2556extern void wake_up_nohz_cpu(int cpu);
2557#else
2558static inline void wake_up_nohz_cpu(int cpu) { }
2559#endif
2560
2561#ifdef CONFIG_NO_HZ_FULL
2562extern u64 scheduler_tick_max_deferment(void);
2563#endif
2564
2565#ifdef CONFIG_SCHED_AUTOGROUP
2566extern void sched_autogroup_create_attach(struct task_struct *p);
2567extern void sched_autogroup_detach(struct task_struct *p);
2568extern void sched_autogroup_fork(struct signal_struct *sig);
2569extern void sched_autogroup_exit(struct signal_struct *sig);
2570extern void sched_autogroup_exit_task(struct task_struct *p);
2571#ifdef CONFIG_PROC_FS
2572extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
2573extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
2574#endif
2575#else
2576static inline void sched_autogroup_create_attach(struct task_struct *p) { }
2577static inline void sched_autogroup_detach(struct task_struct *p) { }
2578static inline void sched_autogroup_fork(struct signal_struct *sig) { }
2579static inline void sched_autogroup_exit(struct signal_struct *sig) { }
2580static inline void sched_autogroup_exit_task(struct task_struct *p) { }
2581#endif
2582
2583extern int yield_to(struct task_struct *p, bool preempt);
2584extern void set_user_nice(struct task_struct *p, long nice);
2585extern int task_prio(const struct task_struct *p);
2586
2587
2588
2589
2590
2591
2592static inline int task_nice(const struct task_struct *p)
2593{
2594 return PRIO_TO_NICE((p)->static_prio);
2595}
2596extern int can_nice(const struct task_struct *p, const int nice);
2597extern int task_curr(const struct task_struct *p);
2598extern int idle_cpu(int cpu);
2599extern int sched_setscheduler(struct task_struct *, int,
2600 const struct sched_param *);
2601extern int sched_setscheduler_nocheck(struct task_struct *, int,
2602 const struct sched_param *);
2603extern int sched_setattr(struct task_struct *,
2604 const struct sched_attr *);
2605extern struct task_struct *idle_task(int cpu);
2606
2607
2608
2609
2610
2611
2612static inline bool is_idle_task(const struct task_struct *p)
2613{
2614 return p->pid == 0;
2615}
2616extern struct task_struct *curr_task(int cpu);
2617extern void ia64_set_curr_task(int cpu, struct task_struct *p);
2618
2619void yield(void);
2620
2621union thread_union {
2622#ifndef CONFIG_THREAD_INFO_IN_TASK
2623 struct thread_info thread_info;
2624#endif
2625 unsigned long stack[THREAD_SIZE/sizeof(long)];
2626};
2627
2628#ifndef __HAVE_ARCH_KSTACK_END
2629static inline int kstack_end(void *addr)
2630{
2631
2632
2633
2634 return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
2635}
2636#endif
2637
2638extern union thread_union init_thread_union;
2639extern struct task_struct init_task;
2640
2641extern struct mm_struct init_mm;
2642
2643extern struct pid_namespace init_pid_ns;
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656extern struct task_struct *find_task_by_vpid(pid_t nr);
2657extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2658 struct pid_namespace *ns);
2659
2660
2661extern struct user_struct * alloc_uid(kuid_t);
2662static inline struct user_struct *get_uid(struct user_struct *u)
2663{
2664 atomic_inc(&u->__count);
2665 return u;
2666}
2667extern void free_uid(struct user_struct *);
2668
2669#include <asm/current.h>
2670
2671extern void xtime_update(unsigned long ticks);
2672
2673extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2674extern int wake_up_process(struct task_struct *tsk);
2675extern void wake_up_new_task(struct task_struct *tsk);
2676#ifdef CONFIG_SMP
2677 extern void kick_process(struct task_struct *tsk);
2678#else
2679 static inline void kick_process(struct task_struct *tsk) { }
2680#endif
2681extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
2682extern void sched_dead(struct task_struct *p);
2683
2684extern void proc_caches_init(void);
2685extern void flush_signals(struct task_struct *);
2686extern void ignore_signals(struct task_struct *);
2687extern void flush_signal_handlers(struct task_struct *, int force_default);
2688extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
2689
2690static inline int kernel_dequeue_signal(siginfo_t *info)
2691{
2692 struct task_struct *tsk = current;
2693 siginfo_t __info;
2694 int ret;
2695
2696 spin_lock_irq(&tsk->sighand->siglock);
2697 ret = dequeue_signal(tsk, &tsk->blocked, info ?: &__info);
2698 spin_unlock_irq(&tsk->sighand->siglock);
2699
2700 return ret;
2701}
2702
2703static inline void kernel_signal_stop(void)
2704{
2705 spin_lock_irq(¤t->sighand->siglock);
2706 if (current->jobctl & JOBCTL_STOP_DEQUEUED)
2707 __set_current_state(TASK_STOPPED);
2708 spin_unlock_irq(¤t->sighand->siglock);
2709
2710 schedule();
2711}
2712
2713extern void release_task(struct task_struct * p);
2714extern int send_sig_info(int, struct siginfo *, struct task_struct *);
2715extern int force_sigsegv(int, struct task_struct *);
2716extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2717extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
2718extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2719extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
2720 const struct cred *, u32);
2721extern int kill_pgrp(struct pid *pid, int sig, int priv);
2722extern int kill_pid(struct pid *pid, int sig, int priv);
2723extern int kill_proc_info(int, struct siginfo *, pid_t);
2724extern __must_check bool do_notify_parent(struct task_struct *, int);
2725extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2726extern void force_sig(int, struct task_struct *);
2727extern int send_sig(int, struct task_struct *, int);
2728extern int zap_other_threads(struct task_struct *p);
2729extern struct sigqueue *sigqueue_alloc(void);
2730extern void sigqueue_free(struct sigqueue *);
2731extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
2732extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
2733
2734#ifdef TIF_RESTORE_SIGMASK
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751static inline void set_restore_sigmask(void)
2752{
2753 set_thread_flag(TIF_RESTORE_SIGMASK);
2754 WARN_ON(!test_thread_flag(TIF_SIGPENDING));
2755}
2756static inline void clear_restore_sigmask(void)
2757{
2758 clear_thread_flag(TIF_RESTORE_SIGMASK);
2759}
2760static inline bool test_restore_sigmask(void)
2761{
2762 return test_thread_flag(TIF_RESTORE_SIGMASK);
2763}
2764static inline bool test_and_clear_restore_sigmask(void)
2765{
2766 return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK);
2767}
2768
2769#else
2770
2771
2772static inline void set_restore_sigmask(void)
2773{
2774 current->restore_sigmask = true;
2775 WARN_ON(!test_thread_flag(TIF_SIGPENDING));
2776}
2777static inline void clear_restore_sigmask(void)
2778{
2779 current->restore_sigmask = false;
2780}
2781static inline bool test_restore_sigmask(void)
2782{
2783 return current->restore_sigmask;
2784}
2785static inline bool test_and_clear_restore_sigmask(void)
2786{
2787 if (!current->restore_sigmask)
2788 return false;
2789 current->restore_sigmask = false;
2790 return true;
2791}
2792#endif
2793
2794static inline void restore_saved_sigmask(void)
2795{
2796 if (test_and_clear_restore_sigmask())
2797 __set_current_blocked(¤t->saved_sigmask);
2798}
2799
2800static inline sigset_t *sigmask_to_save(void)
2801{
2802 sigset_t *res = ¤t->blocked;
2803 if (unlikely(test_restore_sigmask()))
2804 res = ¤t->saved_sigmask;
2805 return res;
2806}
2807
2808static inline int kill_cad_pid(int sig, int priv)
2809{
2810 return kill_pid(cad_pid, sig, priv);
2811}
2812
2813
2814#define SEND_SIG_NOINFO ((struct siginfo *) 0)
2815#define SEND_SIG_PRIV ((struct siginfo *) 1)
2816#define SEND_SIG_FORCED ((struct siginfo *) 2)
2817
2818
2819
2820
2821static inline int on_sig_stack(unsigned long sp)
2822{
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832 if (current->sas_ss_flags & SS_AUTODISARM)
2833 return 0;
2834
2835#ifdef CONFIG_STACK_GROWSUP
2836 return sp >= current->sas_ss_sp &&
2837 sp - current->sas_ss_sp < current->sas_ss_size;
2838#else
2839 return sp > current->sas_ss_sp &&
2840 sp - current->sas_ss_sp <= current->sas_ss_size;
2841#endif
2842}
2843
2844static inline int sas_ss_flags(unsigned long sp)
2845{
2846 if (!current->sas_ss_size)
2847 return SS_DISABLE;
2848
2849 return on_sig_stack(sp) ? SS_ONSTACK : 0;
2850}
2851
2852static inline void sas_ss_reset(struct task_struct *p)
2853{
2854 p->sas_ss_sp = 0;
2855 p->sas_ss_size = 0;
2856 p->sas_ss_flags = SS_DISABLE;
2857}
2858
2859static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
2860{
2861 if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
2862#ifdef CONFIG_STACK_GROWSUP
2863 return current->sas_ss_sp;
2864#else
2865 return current->sas_ss_sp + current->sas_ss_size;
2866#endif
2867 return sp;
2868}
2869
2870
2871
2872
2873extern struct mm_struct * mm_alloc(void);
2874
2875
2876extern void __mmdrop(struct mm_struct *);
2877static inline void mmdrop(struct mm_struct *mm)
2878{
2879 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
2880 __mmdrop(mm);
2881}
2882
2883static inline void mmdrop_async_fn(struct work_struct *work)
2884{
2885 struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
2886 __mmdrop(mm);
2887}
2888
2889static inline void mmdrop_async(struct mm_struct *mm)
2890{
2891 if (unlikely(atomic_dec_and_test(&mm->mm_count))) {
2892 INIT_WORK(&mm->async_put_work, mmdrop_async_fn);
2893 schedule_work(&mm->async_put_work);
2894 }
2895}
2896
2897static inline bool mmget_not_zero(struct mm_struct *mm)
2898{
2899 return atomic_inc_not_zero(&mm->mm_users);
2900}
2901
2902
2903extern void mmput(struct mm_struct *);
2904#ifdef CONFIG_MMU
2905
2906
2907
2908extern void mmput_async(struct mm_struct *);
2909#endif
2910
2911
2912extern struct mm_struct *get_task_mm(struct task_struct *task);
2913
2914
2915
2916
2917
2918extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
2919
2920extern void mm_release(struct task_struct *, struct mm_struct *);
2921
2922#ifdef CONFIG_HAVE_COPY_THREAD_TLS
2923extern int copy_thread_tls(unsigned long, unsigned long, unsigned long,
2924 struct task_struct *, unsigned long);
2925#else
2926extern int copy_thread(unsigned long, unsigned long, unsigned long,
2927 struct task_struct *);
2928
2929
2930
2931static inline int copy_thread_tls(
2932 unsigned long clone_flags, unsigned long sp, unsigned long arg,
2933 struct task_struct *p, unsigned long tls)
2934{
2935 return copy_thread(clone_flags, sp, arg, p);
2936}
2937#endif
2938extern void flush_thread(void);
2939
2940#ifdef CONFIG_HAVE_EXIT_THREAD
2941extern void exit_thread(struct task_struct *tsk);
2942#else
2943static inline void exit_thread(struct task_struct *tsk)
2944{
2945}
2946#endif
2947
2948extern void exit_files(struct task_struct *);
2949extern void __cleanup_sighand(struct sighand_struct *);
2950
2951extern void exit_itimers(struct signal_struct *);
2952extern void flush_itimer_signals(void);
2953
2954extern void do_group_exit(int);
2955
2956extern int do_execve(struct filename *,
2957 const char __user * const __user *,
2958 const char __user * const __user *);
2959extern int do_execveat(int, struct filename *,
2960 const char __user * const __user *,
2961 const char __user * const __user *,
2962 int);
2963extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long);
2964extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
2965struct task_struct *fork_idle(int);
2966extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
2967
2968extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
2969static inline void set_task_comm(struct task_struct *tsk, const char *from)
2970{
2971 __set_task_comm(tsk, from, false);
2972}
2973extern char *get_task_comm(char *to, struct task_struct *tsk);
2974
2975#ifdef CONFIG_SMP
2976void scheduler_ipi(void);
2977extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
2978#else
2979static inline void scheduler_ipi(void) { }
2980static inline unsigned long wait_task_inactive(struct task_struct *p,
2981 long match_state)
2982{
2983 return 1;
2984}
2985#endif
2986
2987#define tasklist_empty() \
2988 list_empty(&init_task.tasks)
2989
2990#define next_task(p) \
2991 list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
2992
2993#define for_each_process(p) \
2994 for (p = &init_task ; (p = next_task(p)) != &init_task ; )
2995
2996extern bool current_is_single_threaded(void);
2997
2998
2999
3000
3001
3002#define do_each_thread(g, t) \
3003 for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
3004
3005#define while_each_thread(g, t) \
3006 while ((t = next_thread(t)) != g)
3007
3008#define __for_each_thread(signal, t) \
3009 list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
3010
3011#define for_each_thread(p, t) \
3012 __for_each_thread((p)->signal, t)
3013
3014
3015#define for_each_process_thread(p, t) \
3016 for_each_process(p) for_each_thread(p, t)
3017
3018static inline int get_nr_threads(struct task_struct *tsk)
3019{
3020 return tsk->signal->nr_threads;
3021}
3022
3023static inline bool thread_group_leader(struct task_struct *p)
3024{
3025 return p->exit_signal >= 0;
3026}
3027
3028
3029
3030
3031
3032
3033
3034static inline bool has_group_leader_pid(struct task_struct *p)
3035{
3036 return task_pid(p) == p->signal->leader_pid;
3037}
3038
3039static inline
3040bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
3041{
3042 return p1->signal == p2->signal;
3043}
3044
3045static inline struct task_struct *next_thread(const struct task_struct *p)
3046{
3047 return list_entry_rcu(p->thread_group.next,
3048 struct task_struct, thread_group);
3049}
3050
3051static inline int thread_group_empty(struct task_struct *p)
3052{
3053 return list_empty(&p->thread_group);
3054}
3055
3056#define delay_group_leader(p) \
3057 (thread_group_leader(p) && !thread_group_empty(p))
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069static inline void task_lock(struct task_struct *p)
3070{
3071 spin_lock(&p->alloc_lock);
3072}
3073
3074static inline void task_unlock(struct task_struct *p)
3075{
3076 spin_unlock(&p->alloc_lock);
3077}
3078
3079extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
3080 unsigned long *flags);
3081
3082static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
3083 unsigned long *flags)
3084{
3085 struct sighand_struct *ret;
3086
3087 ret = __lock_task_sighand(tsk, flags);
3088 (void)__cond_lock(&tsk->sighand->siglock, ret);
3089 return ret;
3090}
3091
3092static inline void unlock_task_sighand(struct task_struct *tsk,
3093 unsigned long *flags)
3094{
3095 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
3096}
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109static inline void threadgroup_change_begin(struct task_struct *tsk)
3110{
3111 might_sleep();
3112 cgroup_threadgroup_change_begin(tsk);
3113}
3114
3115
3116
3117
3118
3119
3120
3121static inline void threadgroup_change_end(struct task_struct *tsk)
3122{
3123 cgroup_threadgroup_change_end(tsk);
3124}
3125
3126#ifdef CONFIG_THREAD_INFO_IN_TASK
3127
3128static inline struct thread_info *task_thread_info(struct task_struct *task)
3129{
3130 return &task->thread_info;
3131}
3132
3133
3134
3135
3136
3137
3138static inline void *task_stack_page(const struct task_struct *task)
3139{
3140 return task->stack;
3141}
3142
3143#define setup_thread_stack(new,old) do { } while(0)
3144
3145static inline unsigned long *end_of_stack(const struct task_struct *task)
3146{
3147 return task->stack;
3148}
3149
3150#elif !defined(__HAVE_THREAD_FUNCTIONS)
3151
3152#define task_thread_info(task) ((struct thread_info *)(task)->stack)
3153#define task_stack_page(task) ((void *)(task)->stack)
3154
3155static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
3156{
3157 *task_thread_info(p) = *task_thread_info(org);
3158 task_thread_info(p)->task = p;
3159}
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170static inline unsigned long *end_of_stack(struct task_struct *p)
3171{
3172#ifdef CONFIG_STACK_GROWSUP
3173 return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
3174#else
3175 return (unsigned long *)(task_thread_info(p) + 1);
3176#endif
3177}
3178
3179#endif
3180
3181#ifdef CONFIG_THREAD_INFO_IN_TASK
3182static inline void *try_get_task_stack(struct task_struct *tsk)
3183{
3184 return atomic_inc_not_zero(&tsk->stack_refcount) ?
3185 task_stack_page(tsk) : NULL;
3186}
3187
3188extern void put_task_stack(struct task_struct *tsk);
3189#else
3190static inline void *try_get_task_stack(struct task_struct *tsk)
3191{
3192 return task_stack_page(tsk);
3193}
3194
3195static inline void put_task_stack(struct task_struct *tsk) {}
3196#endif
3197
3198#define task_stack_end_corrupted(task) \
3199 (*(end_of_stack(task)) != STACK_END_MAGIC)
3200
3201static inline int object_is_on_stack(void *obj)
3202{
3203 void *stack = task_stack_page(current);
3204
3205 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
3206}
3207
3208extern void thread_stack_cache_init(void);
3209
3210#ifdef CONFIG_DEBUG_STACK_USAGE
3211static inline unsigned long stack_not_used(struct task_struct *p)
3212{
3213 unsigned long *n = end_of_stack(p);
3214
3215 do {
3216# ifdef CONFIG_STACK_GROWSUP
3217 n--;
3218# else
3219 n++;
3220# endif
3221 } while (!*n);
3222
3223# ifdef CONFIG_STACK_GROWSUP
3224 return (unsigned long)end_of_stack(p) - (unsigned long)n;
3225# else
3226 return (unsigned long)n - (unsigned long)end_of_stack(p);
3227# endif
3228}
3229#endif
3230extern void set_task_stack_end_magic(struct task_struct *tsk);
3231
3232
3233
3234
3235static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
3236{
3237 set_ti_thread_flag(task_thread_info(tsk), flag);
3238}
3239
3240static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
3241{
3242 clear_ti_thread_flag(task_thread_info(tsk), flag);
3243}
3244
3245static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
3246{
3247 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
3248}
3249
3250static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
3251{
3252 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
3253}
3254
3255static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
3256{
3257 return test_ti_thread_flag(task_thread_info(tsk), flag);
3258}
3259
3260static inline void set_tsk_need_resched(struct task_struct *tsk)
3261{
3262 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
3263}
3264
3265static inline void clear_tsk_need_resched(struct task_struct *tsk)
3266{
3267 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
3268}
3269
3270static inline int test_tsk_need_resched(struct task_struct *tsk)
3271{
3272 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
3273}
3274
3275static inline int restart_syscall(void)
3276{
3277 set_tsk_thread_flag(current, TIF_SIGPENDING);
3278 return -ERESTARTNOINTR;
3279}
3280
3281static inline int signal_pending(struct task_struct *p)
3282{
3283 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
3284}
3285
3286static inline int __fatal_signal_pending(struct task_struct *p)
3287{
3288 return unlikely(sigismember(&p->pending.signal, SIGKILL));
3289}
3290
3291static inline int fatal_signal_pending(struct task_struct *p)
3292{
3293 return signal_pending(p) && __fatal_signal_pending(p);
3294}
3295
3296static inline int signal_pending_state(long state, struct task_struct *p)
3297{
3298 if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
3299 return 0;
3300 if (!signal_pending(p))
3301 return 0;
3302
3303 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
3304}
3305
3306
3307
3308
3309
3310
3311
3312
3313#ifndef CONFIG_PREEMPT
3314extern int _cond_resched(void);
3315#else
3316static inline int _cond_resched(void) { return 0; }
3317#endif
3318
3319#define cond_resched() ({ \
3320 ___might_sleep(__FILE__, __LINE__, 0); \
3321 _cond_resched(); \
3322})
3323
3324extern int __cond_resched_lock(spinlock_t *lock);
3325
3326#define cond_resched_lock(lock) ({ \
3327 ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
3328 __cond_resched_lock(lock); \
3329})
3330
3331extern int __cond_resched_softirq(void);
3332
3333#define cond_resched_softirq() ({ \
3334 ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
3335 __cond_resched_softirq(); \
3336})
3337
3338static inline void cond_resched_rcu(void)
3339{
3340#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
3341 rcu_read_unlock();
3342 cond_resched();
3343 rcu_read_lock();
3344#endif
3345}
3346
3347static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
3348{
3349#ifdef CONFIG_DEBUG_PREEMPT
3350 return p->preempt_disable_ip;
3351#else
3352 return 0;
3353#endif
3354}
3355
3356
3357
3358
3359
3360
3361static inline int spin_needbreak(spinlock_t *lock)
3362{
3363#ifdef CONFIG_PREEMPT
3364 return spin_is_contended(lock);
3365#else
3366 return 0;
3367#endif
3368}
3369
3370
3371
3372
3373
3374#ifdef TIF_POLLING_NRFLAG
3375static inline int tsk_is_polling(struct task_struct *p)
3376{
3377 return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
3378}
3379
3380static inline void __current_set_polling(void)
3381{
3382 set_thread_flag(TIF_POLLING_NRFLAG);
3383}
3384
3385static inline bool __must_check current_set_polling_and_test(void)
3386{
3387 __current_set_polling();
3388
3389
3390
3391
3392
3393 smp_mb__after_atomic();
3394
3395 return unlikely(tif_need_resched());
3396}
3397
3398static inline void __current_clr_polling(void)
3399{
3400 clear_thread_flag(TIF_POLLING_NRFLAG);
3401}
3402
3403static inline bool __must_check current_clr_polling_and_test(void)
3404{
3405 __current_clr_polling();
3406
3407
3408
3409
3410
3411 smp_mb__after_atomic();
3412
3413 return unlikely(tif_need_resched());
3414}
3415
3416#else
3417static inline int tsk_is_polling(struct task_struct *p) { return 0; }
3418static inline void __current_set_polling(void) { }
3419static inline void __current_clr_polling(void) { }
3420
3421static inline bool __must_check current_set_polling_and_test(void)
3422{
3423 return unlikely(tif_need_resched());
3424}
3425static inline bool __must_check current_clr_polling_and_test(void)
3426{
3427 return unlikely(tif_need_resched());
3428}
3429#endif
3430
3431static inline void current_clr_polling(void)
3432{
3433 __current_clr_polling();
3434
3435
3436
3437
3438
3439
3440
3441 smp_mb();
3442
3443 preempt_fold_need_resched();
3444}
3445
3446static __always_inline bool need_resched(void)
3447{
3448 return unlikely(tif_need_resched());
3449}
3450
3451
3452
3453
3454void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
3455void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
3456
3457
3458
3459
3460
3461
3462
3463extern void recalc_sigpending_and_wake(struct task_struct *t);
3464extern void recalc_sigpending(void);
3465
3466extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
3467
3468static inline void signal_wake_up(struct task_struct *t, bool resume)
3469{
3470 signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
3471}
3472static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
3473{
3474 signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
3475}
3476
3477
3478
3479
3480#ifdef CONFIG_SMP
3481
3482static inline unsigned int task_cpu(const struct task_struct *p)
3483{
3484#ifdef CONFIG_THREAD_INFO_IN_TASK
3485 return p->cpu;
3486#else
3487 return task_thread_info(p)->cpu;
3488#endif
3489}
3490
3491static inline int task_node(const struct task_struct *p)
3492{
3493 return cpu_to_node(task_cpu(p));
3494}
3495
3496extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
3497
3498#else
3499
3500static inline unsigned int task_cpu(const struct task_struct *p)
3501{
3502 return 0;
3503}
3504
3505static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
3506{
3507}
3508
3509#endif
3510
3511extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
3512extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
3513
3514#ifdef CONFIG_CGROUP_SCHED
3515extern struct task_group root_task_group;
3516#endif
3517
3518extern int task_can_switch_user(struct user_struct *up,
3519 struct task_struct *tsk);
3520
3521#ifdef CONFIG_TASK_XACCT
3522static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
3523{
3524 tsk->ioac.rchar += amt;
3525}
3526
3527static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
3528{
3529 tsk->ioac.wchar += amt;
3530}
3531
3532static inline void inc_syscr(struct task_struct *tsk)
3533{
3534 tsk->ioac.syscr++;
3535}
3536
3537static inline void inc_syscw(struct task_struct *tsk)
3538{
3539 tsk->ioac.syscw++;
3540}
3541#else
3542static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
3543{
3544}
3545
3546static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
3547{
3548}
3549
3550static inline void inc_syscr(struct task_struct *tsk)
3551{
3552}
3553
3554static inline void inc_syscw(struct task_struct *tsk)
3555{
3556}
3557#endif
3558
3559#ifndef TASK_SIZE_OF
3560#define TASK_SIZE_OF(tsk) TASK_SIZE
3561#endif
3562
3563#ifdef CONFIG_MEMCG
3564extern void mm_update_next_owner(struct mm_struct *mm);
3565#else
3566static inline void mm_update_next_owner(struct mm_struct *mm)
3567{
3568}
3569#endif
3570
3571static inline unsigned long task_rlimit(const struct task_struct *tsk,
3572 unsigned int limit)
3573{
3574 return READ_ONCE(tsk->signal->rlim[limit].rlim_cur);
3575}
3576
3577static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
3578 unsigned int limit)
3579{
3580 return READ_ONCE(tsk->signal->rlim[limit].rlim_max);
3581}
3582
3583static inline unsigned long rlimit(unsigned int limit)
3584{
3585 return task_rlimit(current, limit);
3586}
3587
3588static inline unsigned long rlimit_max(unsigned int limit)
3589{
3590 return task_rlimit_max(current, limit);
3591}
3592
3593#define SCHED_CPUFREQ_RT (1U << 0)
3594#define SCHED_CPUFREQ_DL (1U << 1)
3595#define SCHED_CPUFREQ_IOWAIT (1U << 2)
3596
3597#define SCHED_CPUFREQ_RT_DL (SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL)
3598
3599#ifdef CONFIG_CPU_FREQ
3600struct update_util_data {
3601 void (*func)(struct update_util_data *data, u64 time, unsigned int flags);
3602};
3603
3604void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
3605 void (*func)(struct update_util_data *data, u64 time,
3606 unsigned int flags));
3607void cpufreq_remove_update_util_hook(int cpu);
3608#endif
3609
3610#endif
3611