1#ifndef _LINUX_SCHED_H
2#define _LINUX_SCHED_H
3
4#include <uapi/linux/sched.h>
5
6
7struct sched_param {
8 int sched_priority;
9};
10
11#include <asm/param.h>
12
13#include <linux/capability.h>
14#include <linux/threads.h>
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/timex.h>
18#include <linux/jiffies.h>
19#include <linux/rbtree.h>
20#include <linux/thread_info.h>
21#include <linux/cpumask.h>
22#include <linux/errno.h>
23#include <linux/nodemask.h>
24#include <linux/mm_types.h>
25
26#include <asm/page.h>
27#include <asm/ptrace.h>
28#include <asm/cputime.h>
29
30#include <linux/smp.h>
31#include <linux/sem.h>
32#include <linux/signal.h>
33#include <linux/compiler.h>
34#include <linux/completion.h>
35#include <linux/pid.h>
36#include <linux/percpu.h>
37#include <linux/topology.h>
38#include <linux/proportions.h>
39#include <linux/seccomp.h>
40#include <linux/rcupdate.h>
41#include <linux/rculist.h>
42#include <linux/rtmutex.h>
43
44#include <linux/time.h>
45#include <linux/param.h>
46#include <linux/resource.h>
47#include <linux/timer.h>
48#include <linux/hrtimer.h>
49#include <linux/task_io_accounting.h>
50#include <linux/latencytop.h>
51#include <linux/cred.h>
52#include <linux/llist.h>
53#include <linux/uidgid.h>
54#include <linux/gfp.h>
55
56#include <asm/processor.h>
57
58struct exec_domain;
59struct futex_pi_state;
60struct robust_list_head;
61struct bio_list;
62struct fs_struct;
63struct perf_event_context;
64struct blk_plug;
65
66
67
68
69
70#define CLONE_KERNEL (CLONE_FS | CLONE_FILES | CLONE_SIGHAND)
71
72
73
74
75
76
77
78
79
80
81
82extern unsigned long avenrun[];
83extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
84
85#define FSHIFT 11
86#define FIXED_1 (1<<FSHIFT)
87#define LOAD_FREQ (5*HZ+1)
88#define EXP_1 1884
89#define EXP_5 2014
90#define EXP_15 2037
91
92#define CALC_LOAD(load,exp,n) \
93 load *= exp; \
94 load += n*(FIXED_1-exp); \
95 load >>= FSHIFT;
96
97extern unsigned long total_forks;
98extern int nr_threads;
99DECLARE_PER_CPU(unsigned long, process_counts);
100extern int nr_processes(void);
101extern unsigned long nr_running(void);
102extern unsigned long nr_iowait(void);
103extern unsigned long nr_iowait_cpu(int cpu);
104extern unsigned long this_cpu_load(void);
105
106
107extern void calc_global_load(unsigned long ticks);
108extern void update_cpu_load_nohz(void);
109
110
111struct task_migration_notifier {
112 struct task_struct *task;
113 int from_cpu;
114 int to_cpu;
115};
116extern void register_task_migration_notifier(struct notifier_block *n);
117
118extern unsigned long get_parent_ip(unsigned long addr);
119
120extern void dump_cpu_task(int cpu);
121
122struct seq_file;
123struct cfs_rq;
124struct task_group;
125#ifdef CONFIG_SCHED_DEBUG
126extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
127extern void proc_sched_set_task(struct task_struct *p);
128extern void
129print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
130#endif
131
132
133
134
135
136
137
138
139
140
141
142#define TASK_RUNNING 0
143#define TASK_INTERRUPTIBLE 1
144#define TASK_UNINTERRUPTIBLE 2
145#define __TASK_STOPPED 4
146#define __TASK_TRACED 8
147
148#define EXIT_ZOMBIE 16
149#define EXIT_DEAD 32
150
151#define TASK_DEAD 64
152#define TASK_WAKEKILL 128
153#define TASK_WAKING 256
154#define TASK_PARKED 512
155#define TASK_STATE_MAX 1024
156
157#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
158
159extern char ___assert_task_state[1 - 2*!!(
160 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
161
162
163#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
164#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
165#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
166
167
168#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
169#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
170
171
172#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
173 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
174 __TASK_TRACED)
175
176#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
177#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
178#define task_is_dead(task) ((task)->exit_state != 0)
179#define task_is_stopped_or_traced(task) \
180 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
181#define task_contributes_to_load(task) \
182 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
183 (task->flags & PF_FROZEN) == 0)
184
185#define __set_task_state(tsk, state_value) \
186 do { (tsk)->state = (state_value); } while (0)
187#define set_task_state(tsk, state_value) \
188 set_mb((tsk)->state, (state_value))
189
190
191
192
193
194
195
196
197
198
199
200
201#define __set_current_state(state_value) \
202 do { current->state = (state_value); } while (0)
203#define set_current_state(state_value) \
204 set_mb(current->state, (state_value))
205
206
207#define TASK_COMM_LEN 16
208
209#include <linux/spinlock.h>
210
211
212
213
214
215
216
217extern rwlock_t tasklist_lock;
218extern spinlock_t mmlist_lock;
219
220struct task_struct;
221
222#ifdef CONFIG_PROVE_RCU
223extern int lockdep_tasklist_lock_is_held(void);
224#endif
225
226extern void sched_init(void);
227extern void sched_init_smp(void);
228extern asmlinkage void schedule_tail(struct task_struct *prev);
229extern void init_idle(struct task_struct *idle, int cpu);
230extern void init_idle_bootup_task(struct task_struct *idle);
231
232extern int runqueue_is_locked(int cpu);
233
234#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
235extern void nohz_balance_enter_idle(int cpu);
236extern void set_cpu_sd_state_idle(void);
237extern int get_nohz_timer_target(void);
238#else
239static inline void nohz_balance_enter_idle(int cpu) { }
240static inline void set_cpu_sd_state_idle(void) { }
241#endif
242
243
244
245
246extern void show_state_filter(unsigned long state_filter);
247
248static inline void show_state(void)
249{
250 show_state_filter(0);
251}
252
253extern void show_regs(struct pt_regs *);
254
255
256
257
258
259
260extern void show_stack(struct task_struct *task, unsigned long *sp);
261
262void io_schedule(void);
263long io_schedule_timeout(long timeout);
264
265extern void cpu_init (void);
266extern void trap_init(void);
267extern void update_process_times(int user);
268extern void scheduler_tick(void);
269
270extern void sched_show_task(struct task_struct *p);
271
272#ifdef CONFIG_LOCKUP_DETECTOR
273extern void touch_softlockup_watchdog(void);
274extern void touch_softlockup_watchdog_sync(void);
275extern void touch_all_softlockup_watchdogs(void);
276extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
277 void __user *buffer,
278 size_t *lenp, loff_t *ppos);
279extern unsigned int softlockup_panic;
280void lockup_detector_init(void);
281#else
282static inline void touch_softlockup_watchdog(void)
283{
284}
285static inline void touch_softlockup_watchdog_sync(void)
286{
287}
288static inline void touch_all_softlockup_watchdogs(void)
289{
290}
291static inline void lockup_detector_init(void)
292{
293}
294#endif
295
296
297#define __sched __attribute__((__section__(".sched.text")))
298
299
300extern char __sched_text_start[], __sched_text_end[];
301
302
303extern int in_sched_functions(unsigned long addr);
304
305#define MAX_SCHEDULE_TIMEOUT LONG_MAX
306extern signed long schedule_timeout(signed long timeout);
307extern signed long schedule_timeout_interruptible(signed long timeout);
308extern signed long schedule_timeout_killable(signed long timeout);
309extern signed long schedule_timeout_uninterruptible(signed long timeout);
310asmlinkage void schedule(void);
311extern void schedule_preempt_disabled(void);
312
313struct nsproxy;
314struct user_namespace;
315
316#ifdef CONFIG_MMU
317extern void arch_pick_mmap_layout(struct mm_struct *mm);
318extern unsigned long
319arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
320 unsigned long, unsigned long);
321extern unsigned long
322arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
323 unsigned long len, unsigned long pgoff,
324 unsigned long flags);
325#else
326static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
327#endif
328
329
330extern void set_dumpable(struct mm_struct *mm, int value);
331extern int get_dumpable(struct mm_struct *mm);
332
333
334
335#define MMF_DUMPABLE 0
336#define MMF_DUMP_SECURELY 1
337
338#define MMF_DUMPABLE_BITS 2
339#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
340
341
342#define MMF_DUMP_ANON_PRIVATE 2
343#define MMF_DUMP_ANON_SHARED 3
344#define MMF_DUMP_MAPPED_PRIVATE 4
345#define MMF_DUMP_MAPPED_SHARED 5
346#define MMF_DUMP_ELF_HEADERS 6
347#define MMF_DUMP_HUGETLB_PRIVATE 7
348#define MMF_DUMP_HUGETLB_SHARED 8
349
350#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
351#define MMF_DUMP_FILTER_BITS 7
352#define MMF_DUMP_FILTER_MASK \
353 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
354#define MMF_DUMP_FILTER_DEFAULT \
355 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\
356 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
357
358#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
359# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS)
360#else
361# define MMF_DUMP_MASK_DEFAULT_ELF 0
362#endif
363
364#define MMF_VM_MERGEABLE 16
365#define MMF_VM_HUGEPAGE 17
366#define MMF_EXE_FILE_CHANGED 18
367
368#define MMF_HAS_UPROBES 19
369#define MMF_RECALC_UPROBES 20
370
371#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
372
373struct sighand_struct {
374 atomic_t count;
375 struct k_sigaction action[_NSIG];
376 spinlock_t siglock;
377 wait_queue_head_t signalfd_wqh;
378};
379
380struct pacct_struct {
381 int ac_flag;
382 long ac_exitcode;
383 unsigned long ac_mem;
384 cputime_t ac_utime, ac_stime;
385 unsigned long ac_minflt, ac_majflt;
386};
387
388struct cpu_itimer {
389 cputime_t expires;
390 cputime_t incr;
391 u32 error;
392 u32 incr_error;
393};
394
395
396
397
398
399
400
401
402struct cputime {
403 cputime_t utime;
404 cputime_t stime;
405};
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421struct task_cputime {
422 cputime_t utime;
423 cputime_t stime;
424 unsigned long long sum_exec_runtime;
425};
426
427#define prof_exp stime
428#define virt_exp utime
429#define sched_exp sum_exec_runtime
430
431#define INIT_CPUTIME \
432 (struct task_cputime) { \
433 .utime = 0, \
434 .stime = 0, \
435 .sum_exec_runtime = 0, \
436 }
437
438
439
440
441
442
443
444
445#define INIT_PREEMPT_COUNT (1 + PREEMPT_ACTIVE)
446
447
448
449
450
451
452
453
454
455
456
457struct thread_group_cputimer {
458 struct task_cputime cputime;
459 int running;
460 raw_spinlock_t lock;
461};
462
463#include <linux/rwsem.h>
464struct autogroup;
465
466
467
468
469
470
471
472
473struct signal_struct {
474 atomic_t sigcnt;
475 atomic_t live;
476 int nr_threads;
477
478 wait_queue_head_t wait_chldexit;
479
480
481 struct task_struct *curr_target;
482
483
484 struct sigpending shared_pending;
485
486
487 int group_exit_code;
488
489
490
491
492
493 int notify_count;
494 struct task_struct *group_exit_task;
495
496
497 int group_stop_count;
498 unsigned int flags;
499
500
501
502
503
504
505
506
507
508
509 unsigned int is_child_subreaper:1;
510 unsigned int has_child_subreaper:1;
511
512
513 int posix_timer_id;
514 struct list_head posix_timers;
515
516
517 struct hrtimer real_timer;
518 struct pid *leader_pid;
519 ktime_t it_real_incr;
520
521
522
523
524
525
526 struct cpu_itimer it[2];
527
528
529
530
531
532 struct thread_group_cputimer cputimer;
533
534
535 struct task_cputime cputime_expires;
536
537 struct list_head cpu_timers[3];
538
539 struct pid *tty_old_pgrp;
540
541
542 int leader;
543
544 struct tty_struct *tty;
545
546#ifdef CONFIG_SCHED_AUTOGROUP
547 struct autogroup *autogroup;
548#endif
549
550
551
552
553
554
555 cputime_t utime, stime, cutime, cstime;
556 cputime_t gtime;
557 cputime_t cgtime;
558#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
559 struct cputime prev_cputime;
560#endif
561 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
562 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
563 unsigned long inblock, oublock, cinblock, coublock;
564 unsigned long maxrss, cmaxrss;
565 struct task_io_accounting ioac;
566
567
568
569
570
571
572
573 unsigned long long sum_sched_runtime;
574
575
576
577
578
579
580
581
582
583
584 struct rlimit rlim[RLIM_NLIMITS];
585
586#ifdef CONFIG_BSD_PROCESS_ACCT
587 struct pacct_struct pacct;
588#endif
589#ifdef CONFIG_TASKSTATS
590 struct taskstats *stats;
591#endif
592#ifdef CONFIG_AUDIT
593 unsigned audit_tty;
594 unsigned audit_tty_log_passwd;
595 struct tty_audit_buf *tty_audit_buf;
596#endif
597#ifdef CONFIG_CGROUPS
598
599
600
601
602
603
604
605
606
607 struct rw_semaphore group_rwsem;
608#endif
609
610 oom_flags_t oom_flags;
611 short oom_score_adj;
612 short oom_score_adj_min;
613
614
615 struct mutex cred_guard_mutex;
616
617
618};
619
620
621
622
623#define SIGNAL_STOP_STOPPED 0x00000001
624#define SIGNAL_STOP_CONTINUED 0x00000002
625#define SIGNAL_GROUP_EXIT 0x00000004
626#define SIGNAL_GROUP_COREDUMP 0x00000008
627
628
629
630#define SIGNAL_CLD_STOPPED 0x00000010
631#define SIGNAL_CLD_CONTINUED 0x00000020
632#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
633
634#define SIGNAL_UNKILLABLE 0x00000040
635
636
637static inline int signal_group_exit(const struct signal_struct *sig)
638{
639 return (sig->flags & SIGNAL_GROUP_EXIT) ||
640 (sig->group_exit_task != NULL);
641}
642
643
644
645
646struct user_struct {
647 atomic_t __count;
648 atomic_t processes;
649 atomic_t files;
650 atomic_t sigpending;
651#ifdef CONFIG_INOTIFY_USER
652 atomic_t inotify_watches;
653 atomic_t inotify_devs;
654#endif
655#ifdef CONFIG_FANOTIFY
656 atomic_t fanotify_listeners;
657#endif
658#ifdef CONFIG_EPOLL
659 atomic_long_t epoll_watches;
660#endif
661#ifdef CONFIG_POSIX_MQUEUE
662
663 unsigned long mq_bytes;
664#endif
665 unsigned long locked_shm;
666
667#ifdef CONFIG_KEYS
668 struct key *uid_keyring;
669 struct key *session_keyring;
670#endif
671
672
673 struct hlist_node uidhash_node;
674 kuid_t uid;
675
676#ifdef CONFIG_PERF_EVENTS
677 atomic_long_t locked_vm;
678#endif
679};
680
681extern int uids_sysfs_init(void);
682
683extern struct user_struct *find_user(kuid_t);
684
685extern struct user_struct root_user;
686#define INIT_USER (&root_user)
687
688
689struct backing_dev_info;
690struct reclaim_state;
691
692#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
693struct sched_info {
694
695 unsigned long pcount;
696 unsigned long long run_delay;
697
698
699 unsigned long long last_arrival,
700 last_queued;
701};
702#endif
703
704#ifdef CONFIG_TASK_DELAY_ACCT
705struct task_delay_info {
706 spinlock_t lock;
707 unsigned int flags;
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724 struct timespec blkio_start, blkio_end;
725 u64 blkio_delay;
726 u64 swapin_delay;
727 u32 blkio_count;
728
729 u32 swapin_count;
730
731
732 struct timespec freepages_start, freepages_end;
733 u64 freepages_delay;
734 u32 freepages_count;
735};
736#endif
737
738static inline int sched_info_on(void)
739{
740#ifdef CONFIG_SCHEDSTATS
741 return 1;
742#elif defined(CONFIG_TASK_DELAY_ACCT)
743 extern int delayacct_on;
744 return delayacct_on;
745#else
746 return 0;
747#endif
748}
749
750enum cpu_idle_type {
751 CPU_IDLE,
752 CPU_NOT_IDLE,
753 CPU_NEWLY_IDLE,
754 CPU_MAX_IDLE_TYPES
755};
756
757
758
759
760#define SCHED_POWER_SHIFT 10
761#define SCHED_POWER_SCALE (1L << SCHED_POWER_SHIFT)
762
763
764
765
766#ifdef CONFIG_SMP
767#define SD_LOAD_BALANCE 0x0001
768#define SD_BALANCE_NEWIDLE 0x0002
769#define SD_BALANCE_EXEC 0x0004
770#define SD_BALANCE_FORK 0x0008
771#define SD_BALANCE_WAKE 0x0010
772#define SD_WAKE_AFFINE 0x0020
773#define SD_SHARE_CPUPOWER 0x0080
774#define SD_SHARE_PKG_RESOURCES 0x0200
775#define SD_SERIALIZE 0x0400
776#define SD_ASYM_PACKING 0x0800
777#define SD_PREFER_SIBLING 0x1000
778#define SD_OVERLAP 0x2000
779
780extern int __weak arch_sd_sibiling_asym_packing(void);
781
782struct sched_domain_attr {
783 int relax_domain_level;
784};
785
786#define SD_ATTR_INIT (struct sched_domain_attr) { \
787 .relax_domain_level = -1, \
788}
789
790extern int sched_domain_level_max;
791
792struct sched_group;
793
794struct sched_domain {
795
796 struct sched_domain *parent;
797 struct sched_domain *child;
798 struct sched_group *groups;
799 unsigned long min_interval;
800 unsigned long max_interval;
801 unsigned int busy_factor;
802 unsigned int imbalance_pct;
803 unsigned int cache_nice_tries;
804 unsigned int busy_idx;
805 unsigned int idle_idx;
806 unsigned int newidle_idx;
807 unsigned int wake_idx;
808 unsigned int forkexec_idx;
809 unsigned int smt_gain;
810
811 int nohz_idle;
812 int flags;
813 int level;
814
815
816 unsigned long last_balance;
817 unsigned int balance_interval;
818 unsigned int nr_balance_failed;
819
820 u64 last_update;
821
822#ifdef CONFIG_SCHEDSTATS
823
824 unsigned int lb_count[CPU_MAX_IDLE_TYPES];
825 unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
826 unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
827 unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
828 unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
829 unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
830 unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
831 unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
832
833
834 unsigned int alb_count;
835 unsigned int alb_failed;
836 unsigned int alb_pushed;
837
838
839 unsigned int sbe_count;
840 unsigned int sbe_balanced;
841 unsigned int sbe_pushed;
842
843
844 unsigned int sbf_count;
845 unsigned int sbf_balanced;
846 unsigned int sbf_pushed;
847
848
849 unsigned int ttwu_wake_remote;
850 unsigned int ttwu_move_affine;
851 unsigned int ttwu_move_balance;
852#endif
853#ifdef CONFIG_SCHED_DEBUG
854 char *name;
855#endif
856 union {
857 void *private;
858 struct rcu_head rcu;
859 };
860
861 unsigned int span_weight;
862
863
864
865
866
867
868
869 unsigned long span[0];
870};
871
872static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
873{
874 return to_cpumask(sd->span);
875}
876
877extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
878 struct sched_domain_attr *dattr_new);
879
880
881cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
882void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
883
884bool cpus_share_cache(int this_cpu, int that_cpu);
885
886#else
887
888struct sched_domain_attr;
889
890static inline void
891partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
892 struct sched_domain_attr *dattr_new)
893{
894}
895
896static inline bool cpus_share_cache(int this_cpu, int that_cpu)
897{
898 return true;
899}
900
901#endif
902
903
904struct io_context;
905
906
907#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
908extern void prefetch_stack(struct task_struct *t);
909#else
910static inline void prefetch_stack(struct task_struct *t) { }
911#endif
912
913struct audit_context;
914struct mempolicy;
915struct pipe_inode_info;
916struct uts_namespace;
917
918struct load_weight {
919 unsigned long weight, inv_weight;
920};
921
922struct sched_avg {
923
924
925
926
927
928 u32 runnable_avg_sum, runnable_avg_period;
929 u64 last_runnable_update;
930 s64 decay_count;
931 unsigned long load_avg_contrib;
932};
933
934#ifdef CONFIG_SCHEDSTATS
935struct sched_statistics {
936 u64 wait_start;
937 u64 wait_max;
938 u64 wait_count;
939 u64 wait_sum;
940 u64 iowait_count;
941 u64 iowait_sum;
942
943 u64 sleep_start;
944 u64 sleep_max;
945 s64 sum_sleep_runtime;
946
947 u64 block_start;
948 u64 block_max;
949 u64 exec_max;
950 u64 slice_max;
951
952 u64 nr_migrations_cold;
953 u64 nr_failed_migrations_affine;
954 u64 nr_failed_migrations_running;
955 u64 nr_failed_migrations_hot;
956 u64 nr_forced_migrations;
957
958 u64 nr_wakeups;
959 u64 nr_wakeups_sync;
960 u64 nr_wakeups_migrate;
961 u64 nr_wakeups_local;
962 u64 nr_wakeups_remote;
963 u64 nr_wakeups_affine;
964 u64 nr_wakeups_affine_attempts;
965 u64 nr_wakeups_passive;
966 u64 nr_wakeups_idle;
967};
968#endif
969
970struct sched_entity {
971 struct load_weight load;
972 struct rb_node run_node;
973 struct list_head group_node;
974 unsigned int on_rq;
975
976 u64 exec_start;
977 u64 sum_exec_runtime;
978 u64 vruntime;
979 u64 prev_sum_exec_runtime;
980
981 u64 nr_migrations;
982
983#ifdef CONFIG_SCHEDSTATS
984 struct sched_statistics statistics;
985#endif
986
987#ifdef CONFIG_FAIR_GROUP_SCHED
988 struct sched_entity *parent;
989
990 struct cfs_rq *cfs_rq;
991
992 struct cfs_rq *my_q;
993#endif
994
995#ifdef CONFIG_SMP
996
997 struct sched_avg avg;
998#endif
999};
1000
1001struct sched_rt_entity {
1002 struct list_head run_list;
1003 unsigned long timeout;
1004 unsigned long watchdog_stamp;
1005 unsigned int time_slice;
1006
1007 struct sched_rt_entity *back;
1008#ifdef CONFIG_RT_GROUP_SCHED
1009 struct sched_rt_entity *parent;
1010
1011 struct rt_rq *rt_rq;
1012
1013 struct rt_rq *my_q;
1014#endif
1015};
1016
1017
1018struct rcu_node;
1019
1020enum perf_event_task_context {
1021 perf_invalid_context = -1,
1022 perf_hw_context = 0,
1023 perf_sw_context,
1024 perf_nr_task_contexts,
1025};
1026
1027struct task_struct {
1028 volatile long state;
1029 void *stack;
1030 atomic_t usage;
1031 unsigned int flags;
1032 unsigned int ptrace;
1033
1034#ifdef CONFIG_SMP
1035 struct llist_node wake_entry;
1036 int on_cpu;
1037#endif
1038 int on_rq;
1039
1040 int prio, static_prio, normal_prio;
1041 unsigned int rt_priority;
1042 const struct sched_class *sched_class;
1043 struct sched_entity se;
1044 struct sched_rt_entity rt;
1045#ifdef CONFIG_CGROUP_SCHED
1046 struct task_group *sched_task_group;
1047#endif
1048
1049#ifdef CONFIG_PREEMPT_NOTIFIERS
1050
1051 struct hlist_head preempt_notifiers;
1052#endif
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062 unsigned char fpu_counter;
1063#ifdef CONFIG_BLK_DEV_IO_TRACE
1064 unsigned int btrace_seq;
1065#endif
1066
1067 unsigned int policy;
1068 int nr_cpus_allowed;
1069 cpumask_t cpus_allowed;
1070
1071#ifdef CONFIG_PREEMPT_RCU
1072 int rcu_read_lock_nesting;
1073 char rcu_read_unlock_special;
1074 struct list_head rcu_node_entry;
1075#endif
1076#ifdef CONFIG_TREE_PREEMPT_RCU
1077 struct rcu_node *rcu_blocked_node;
1078#endif
1079#ifdef CONFIG_RCU_BOOST
1080 struct rt_mutex *rcu_boost_mutex;
1081#endif
1082
1083#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1084 struct sched_info sched_info;
1085#endif
1086
1087 struct list_head tasks;
1088#ifdef CONFIG_SMP
1089 struct plist_node pushable_tasks;
1090#endif
1091
1092 struct mm_struct *mm, *active_mm;
1093#ifdef CONFIG_COMPAT_BRK
1094 unsigned brk_randomized:1;
1095#endif
1096#if defined(SPLIT_RSS_COUNTING)
1097 struct task_rss_stat rss_stat;
1098#endif
1099
1100 int exit_state;
1101 int exit_code, exit_signal;
1102 int pdeath_signal;
1103 unsigned int jobctl;
1104
1105
1106 unsigned int personality;
1107
1108 unsigned did_exec:1;
1109 unsigned in_execve:1;
1110
1111 unsigned in_iowait:1;
1112
1113
1114 unsigned no_new_privs:1;
1115
1116
1117 unsigned sched_reset_on_fork:1;
1118 unsigned sched_contributes_to_load:1;
1119
1120 pid_t pid;
1121 pid_t tgid;
1122
1123#ifdef CONFIG_CC_STACKPROTECTOR
1124
1125 unsigned long stack_canary;
1126#endif
1127
1128
1129
1130
1131
1132 struct task_struct __rcu *real_parent;
1133 struct task_struct __rcu *parent;
1134
1135
1136
1137 struct list_head children;
1138 struct list_head sibling;
1139 struct task_struct *group_leader;
1140
1141
1142
1143
1144
1145
1146 struct list_head ptraced;
1147 struct list_head ptrace_entry;
1148
1149
1150 struct pid_link pids[PIDTYPE_MAX];
1151 struct list_head thread_group;
1152
1153 struct completion *vfork_done;
1154 int __user *set_child_tid;
1155 int __user *clear_child_tid;
1156
1157 cputime_t utime, stime, utimescaled, stimescaled;
1158 cputime_t gtime;
1159#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
1160 struct cputime prev_cputime;
1161#endif
1162#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1163 seqlock_t vtime_seqlock;
1164 unsigned long long vtime_snap;
1165 enum {
1166 VTIME_SLEEPING = 0,
1167 VTIME_USER,
1168 VTIME_SYS,
1169 } vtime_snap_whence;
1170#endif
1171 unsigned long nvcsw, nivcsw;
1172 struct timespec start_time;
1173 struct timespec real_start_time;
1174
1175 unsigned long min_flt, maj_flt;
1176
1177 struct task_cputime cputime_expires;
1178 struct list_head cpu_timers[3];
1179
1180
1181 const struct cred __rcu *real_cred;
1182
1183 const struct cred __rcu *cred;
1184
1185 char comm[TASK_COMM_LEN];
1186
1187
1188
1189
1190 int link_count, total_link_count;
1191#ifdef CONFIG_SYSVIPC
1192
1193 struct sysv_sem sysvsem;
1194#endif
1195#ifdef CONFIG_DETECT_HUNG_TASK
1196
1197 unsigned long last_switch_count;
1198#endif
1199
1200 struct thread_struct thread;
1201
1202 struct fs_struct *fs;
1203
1204 struct files_struct *files;
1205
1206 struct nsproxy *nsproxy;
1207
1208 struct signal_struct *signal;
1209 struct sighand_struct *sighand;
1210
1211 sigset_t blocked, real_blocked;
1212 sigset_t saved_sigmask;
1213 struct sigpending pending;
1214
1215 unsigned long sas_ss_sp;
1216 size_t sas_ss_size;
1217 int (*notifier)(void *priv);
1218 void *notifier_data;
1219 sigset_t *notifier_mask;
1220 struct callback_head *task_works;
1221
1222 struct audit_context *audit_context;
1223#ifdef CONFIG_AUDITSYSCALL
1224 kuid_t loginuid;
1225 unsigned int sessionid;
1226#endif
1227 struct seccomp seccomp;
1228
1229
1230 u32 parent_exec_id;
1231 u32 self_exec_id;
1232
1233
1234 spinlock_t alloc_lock;
1235
1236
1237 raw_spinlock_t pi_lock;
1238
1239#ifdef CONFIG_RT_MUTEXES
1240
1241 struct plist_head pi_waiters;
1242
1243 struct rt_mutex_waiter *pi_blocked_on;
1244#endif
1245
1246#ifdef CONFIG_DEBUG_MUTEXES
1247
1248 struct mutex_waiter *blocked_on;
1249#endif
1250#ifdef CONFIG_TRACE_IRQFLAGS
1251 unsigned int irq_events;
1252 unsigned long hardirq_enable_ip;
1253 unsigned long hardirq_disable_ip;
1254 unsigned int hardirq_enable_event;
1255 unsigned int hardirq_disable_event;
1256 int hardirqs_enabled;
1257 int hardirq_context;
1258 unsigned long softirq_disable_ip;
1259 unsigned long softirq_enable_ip;
1260 unsigned int softirq_disable_event;
1261 unsigned int softirq_enable_event;
1262 int softirqs_enabled;
1263 int softirq_context;
1264#endif
1265#ifdef CONFIG_LOCKDEP
1266# define MAX_LOCK_DEPTH 48UL
1267 u64 curr_chain_key;
1268 int lockdep_depth;
1269 unsigned int lockdep_recursion;
1270 struct held_lock held_locks[MAX_LOCK_DEPTH];
1271 gfp_t lockdep_reclaim_gfp;
1272#endif
1273
1274
1275 void *journal_info;
1276
1277
1278 struct bio_list *bio_list;
1279
1280#ifdef CONFIG_BLOCK
1281
1282 struct blk_plug *plug;
1283#endif
1284
1285
1286 struct reclaim_state *reclaim_state;
1287
1288 struct backing_dev_info *backing_dev_info;
1289
1290 struct io_context *io_context;
1291
1292 unsigned long ptrace_message;
1293 siginfo_t *last_siginfo;
1294 struct task_io_accounting ioac;
1295#if defined(CONFIG_TASK_XACCT)
1296 u64 acct_rss_mem1;
1297 u64 acct_vm_mem1;
1298 cputime_t acct_timexpd;
1299#endif
1300#ifdef CONFIG_CPUSETS
1301 nodemask_t mems_allowed;
1302 seqcount_t mems_allowed_seq;
1303 int cpuset_mem_spread_rotor;
1304 int cpuset_slab_spread_rotor;
1305#endif
1306#ifdef CONFIG_CGROUPS
1307
1308 struct css_set __rcu *cgroups;
1309
1310 struct list_head cg_list;
1311#endif
1312#ifdef CONFIG_FUTEX
1313 struct robust_list_head __user *robust_list;
1314#ifdef CONFIG_COMPAT
1315 struct compat_robust_list_head __user *compat_robust_list;
1316#endif
1317 struct list_head pi_state_list;
1318 struct futex_pi_state *pi_state_cache;
1319#endif
1320#ifdef CONFIG_PERF_EVENTS
1321 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1322 struct mutex perf_event_mutex;
1323 struct list_head perf_event_list;
1324#endif
1325#ifdef CONFIG_NUMA
1326 struct mempolicy *mempolicy;
1327 short il_next;
1328 short pref_node_fork;
1329#endif
1330#ifdef CONFIG_NUMA_BALANCING
1331 int numa_scan_seq;
1332 int numa_migrate_seq;
1333 unsigned int numa_scan_period;
1334 u64 node_stamp;
1335 struct callback_head numa_work;
1336#endif
1337
1338 struct rcu_head rcu;
1339
1340
1341
1342
1343 struct pipe_inode_info *splice_pipe;
1344
1345 struct page_frag task_frag;
1346
1347#ifdef CONFIG_TASK_DELAY_ACCT
1348 struct task_delay_info *delays;
1349#endif
1350#ifdef CONFIG_FAULT_INJECTION
1351 int make_it_fail;
1352#endif
1353
1354
1355
1356
1357 int nr_dirtied;
1358 int nr_dirtied_pause;
1359 unsigned long dirty_paused_when;
1360
1361#ifdef CONFIG_LATENCYTOP
1362 int latency_record_count;
1363 struct latency_record latency_record[LT_SAVECOUNT];
1364#endif
1365
1366
1367
1368
1369 unsigned long timer_slack_ns;
1370 unsigned long default_timer_slack_ns;
1371
1372#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1373
1374 int curr_ret_stack;
1375
1376 struct ftrace_ret_stack *ret_stack;
1377
1378 unsigned long long ftrace_timestamp;
1379
1380
1381
1382
1383 atomic_t trace_overrun;
1384
1385 atomic_t tracing_graph_pause;
1386#endif
1387#ifdef CONFIG_TRACING
1388
1389 unsigned long trace;
1390
1391 unsigned long trace_recursion;
1392#endif
1393#ifdef CONFIG_MEMCG
1394 struct memcg_batch_info {
1395 int do_batch;
1396 struct mem_cgroup *memcg;
1397 unsigned long nr_pages;
1398 unsigned long memsw_nr_pages;
1399 } memcg_batch;
1400 unsigned int memcg_kmem_skip_account;
1401#endif
1402#ifdef CONFIG_UPROBES
1403 struct uprobe_task *utask;
1404#endif
1405#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1406 unsigned int sequential_io;
1407 unsigned int sequential_io_avg;
1408#endif
1409};
1410
1411
1412#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1413
1414#ifdef CONFIG_NUMA_BALANCING
1415extern void task_numa_fault(int node, int pages, bool migrated);
1416extern void set_numabalancing_state(bool enabled);
1417#else
1418static inline void task_numa_fault(int node, int pages, bool migrated)
1419{
1420}
1421static inline void set_numabalancing_state(bool enabled)
1422{
1423}
1424#endif
1425
1426static inline struct pid *task_pid(struct task_struct *task)
1427{
1428 return task->pids[PIDTYPE_PID].pid;
1429}
1430
1431static inline struct pid *task_tgid(struct task_struct *task)
1432{
1433 return task->group_leader->pids[PIDTYPE_PID].pid;
1434}
1435
1436
1437
1438
1439
1440
1441static inline struct pid *task_pgrp(struct task_struct *task)
1442{
1443 return task->group_leader->pids[PIDTYPE_PGID].pid;
1444}
1445
1446static inline struct pid *task_session(struct task_struct *task)
1447{
1448 return task->group_leader->pids[PIDTYPE_SID].pid;
1449}
1450
1451struct pid_namespace;
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1467 struct pid_namespace *ns);
1468
1469static inline pid_t task_pid_nr(struct task_struct *tsk)
1470{
1471 return tsk->pid;
1472}
1473
1474static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
1475 struct pid_namespace *ns)
1476{
1477 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1478}
1479
1480static inline pid_t task_pid_vnr(struct task_struct *tsk)
1481{
1482 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1483}
1484
1485
1486static inline pid_t task_tgid_nr(struct task_struct *tsk)
1487{
1488 return tsk->tgid;
1489}
1490
1491pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1492
1493static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1494{
1495 return pid_vnr(task_tgid(tsk));
1496}
1497
1498
1499static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
1500 struct pid_namespace *ns)
1501{
1502 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1503}
1504
1505static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1506{
1507 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1508}
1509
1510
1511static inline pid_t task_session_nr_ns(struct task_struct *tsk,
1512 struct pid_namespace *ns)
1513{
1514 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1515}
1516
1517static inline pid_t task_session_vnr(struct task_struct *tsk)
1518{
1519 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1520}
1521
1522
1523static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1524{
1525 return task_pgrp_nr_ns(tsk, &init_pid_ns);
1526}
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538static inline int pid_alive(struct task_struct *p)
1539{
1540 return p->pids[PIDTYPE_PID].pid != NULL;
1541}
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551static inline int is_global_init(struct task_struct *tsk)
1552{
1553 return tsk->pid == 1;
1554}
1555
1556extern struct pid *cad_pid;
1557
1558extern void free_task(struct task_struct *tsk);
1559#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
1560
1561extern void __put_task_struct(struct task_struct *t);
1562
1563static inline void put_task_struct(struct task_struct *t)
1564{
1565 if (atomic_dec_and_test(&t->usage))
1566 __put_task_struct(t);
1567}
1568
1569#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1570extern void task_cputime(struct task_struct *t,
1571 cputime_t *utime, cputime_t *stime);
1572extern void task_cputime_scaled(struct task_struct *t,
1573 cputime_t *utimescaled, cputime_t *stimescaled);
1574extern cputime_t task_gtime(struct task_struct *t);
1575#else
1576static inline void task_cputime(struct task_struct *t,
1577 cputime_t *utime, cputime_t *stime)
1578{
1579 if (utime)
1580 *utime = t->utime;
1581 if (stime)
1582 *stime = t->stime;
1583}
1584
1585static inline void task_cputime_scaled(struct task_struct *t,
1586 cputime_t *utimescaled,
1587 cputime_t *stimescaled)
1588{
1589 if (utimescaled)
1590 *utimescaled = t->utimescaled;
1591 if (stimescaled)
1592 *stimescaled = t->stimescaled;
1593}
1594
1595static inline cputime_t task_gtime(struct task_struct *t)
1596{
1597 return t->gtime;
1598}
1599#endif
1600extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1601extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1602
1603
1604
1605
1606#define PF_EXITING 0x00000004
1607#define PF_EXITPIDONE 0x00000008
1608#define PF_VCPU 0x00000010
1609#define PF_WQ_WORKER 0x00000020
1610#define PF_FORKNOEXEC 0x00000040
1611#define PF_MCE_PROCESS 0x00000080
1612#define PF_SUPERPRIV 0x00000100
1613#define PF_DUMPCORE 0x00000200
1614#define PF_SIGNALED 0x00000400
1615#define PF_MEMALLOC 0x00000800
1616#define PF_NPROC_EXCEEDED 0x00001000
1617#define PF_USED_MATH 0x00002000
1618#define PF_USED_ASYNC 0x00004000
1619#define PF_NOFREEZE 0x00008000
1620#define PF_FROZEN 0x00010000
1621#define PF_FSTRANS 0x00020000
1622#define PF_KSWAPD 0x00040000
1623#define PF_MEMALLOC_NOIO 0x00080000
1624#define PF_LESS_THROTTLE 0x00100000
1625#define PF_KTHREAD 0x00200000
1626#define PF_RANDOMIZE 0x00400000
1627#define PF_SWAPWRITE 0x00800000
1628#define PF_SPREAD_PAGE 0x01000000
1629#define PF_SPREAD_SLAB 0x02000000
1630#define PF_NO_SETAFFINITY 0x04000000
1631#define PF_MCE_EARLY 0x08000000
1632#define PF_MEMPOLICY 0x10000000
1633#define PF_MUTEX_TESTER 0x20000000
1634#define PF_FREEZER_SKIP 0x40000000
1635#define PF_SUSPEND_TASK 0x80000000
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1649#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1650#define clear_used_math() clear_stopped_child_used_math(current)
1651#define set_used_math() set_stopped_child_used_math(current)
1652#define conditional_stopped_child_used_math(condition, child) \
1653 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1654#define conditional_used_math(condition) \
1655 conditional_stopped_child_used_math(condition, current)
1656#define copy_to_stopped_child_used_math(child) \
1657 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1658
1659#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1660#define used_math() tsk_used_math(current)
1661
1662
1663static inline gfp_t memalloc_noio_flags(gfp_t flags)
1664{
1665 if (unlikely(current->flags & PF_MEMALLOC_NOIO))
1666 flags &= ~__GFP_IO;
1667 return flags;
1668}
1669
1670static inline unsigned int memalloc_noio_save(void)
1671{
1672 unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
1673 current->flags |= PF_MEMALLOC_NOIO;
1674 return flags;
1675}
1676
1677static inline void memalloc_noio_restore(unsigned int flags)
1678{
1679 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
1680}
1681
1682
1683
1684
1685#define JOBCTL_STOP_SIGMASK 0xffff
1686
1687#define JOBCTL_STOP_DEQUEUED_BIT 16
1688#define JOBCTL_STOP_PENDING_BIT 17
1689#define JOBCTL_STOP_CONSUME_BIT 18
1690#define JOBCTL_TRAP_STOP_BIT 19
1691#define JOBCTL_TRAP_NOTIFY_BIT 20
1692#define JOBCTL_TRAPPING_BIT 21
1693#define JOBCTL_LISTENING_BIT 22
1694
1695#define JOBCTL_STOP_DEQUEUED (1 << JOBCTL_STOP_DEQUEUED_BIT)
1696#define JOBCTL_STOP_PENDING (1 << JOBCTL_STOP_PENDING_BIT)
1697#define JOBCTL_STOP_CONSUME (1 << JOBCTL_STOP_CONSUME_BIT)
1698#define JOBCTL_TRAP_STOP (1 << JOBCTL_TRAP_STOP_BIT)
1699#define JOBCTL_TRAP_NOTIFY (1 << JOBCTL_TRAP_NOTIFY_BIT)
1700#define JOBCTL_TRAPPING (1 << JOBCTL_TRAPPING_BIT)
1701#define JOBCTL_LISTENING (1 << JOBCTL_LISTENING_BIT)
1702
1703#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
1704#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
1705
1706extern bool task_set_jobctl_pending(struct task_struct *task,
1707 unsigned int mask);
1708extern void task_clear_jobctl_trapping(struct task_struct *task);
1709extern void task_clear_jobctl_pending(struct task_struct *task,
1710 unsigned int mask);
1711
1712#ifdef CONFIG_PREEMPT_RCU
1713
1714#define RCU_READ_UNLOCK_BLOCKED (1 << 0)
1715#define RCU_READ_UNLOCK_NEED_QS (1 << 1)
1716
1717static inline void rcu_copy_process(struct task_struct *p)
1718{
1719 p->rcu_read_lock_nesting = 0;
1720 p->rcu_read_unlock_special = 0;
1721#ifdef CONFIG_TREE_PREEMPT_RCU
1722 p->rcu_blocked_node = NULL;
1723#endif
1724#ifdef CONFIG_RCU_BOOST
1725 p->rcu_boost_mutex = NULL;
1726#endif
1727 INIT_LIST_HEAD(&p->rcu_node_entry);
1728}
1729
1730#else
1731
1732static inline void rcu_copy_process(struct task_struct *p)
1733{
1734}
1735
1736#endif
1737
1738static inline void tsk_restore_flags(struct task_struct *task,
1739 unsigned long orig_flags, unsigned long flags)
1740{
1741 task->flags &= ~flags;
1742 task->flags |= orig_flags & flags;
1743}
1744
1745#ifdef CONFIG_SMP
1746extern void do_set_cpus_allowed(struct task_struct *p,
1747 const struct cpumask *new_mask);
1748
1749extern int set_cpus_allowed_ptr(struct task_struct *p,
1750 const struct cpumask *new_mask);
1751#else
1752static inline void do_set_cpus_allowed(struct task_struct *p,
1753 const struct cpumask *new_mask)
1754{
1755}
1756static inline int set_cpus_allowed_ptr(struct task_struct *p,
1757 const struct cpumask *new_mask)
1758{
1759 if (!cpumask_test_cpu(0, new_mask))
1760 return -EINVAL;
1761 return 0;
1762}
1763#endif
1764
1765#ifdef CONFIG_NO_HZ_COMMON
1766void calc_load_enter_idle(void);
1767void calc_load_exit_idle(void);
1768#else
1769static inline void calc_load_enter_idle(void) { }
1770static inline void calc_load_exit_idle(void) { }
1771#endif
1772
1773#ifndef CONFIG_CPUMASK_OFFSTACK
1774static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1775{
1776 return set_cpus_allowed_ptr(p, &new_mask);
1777}
1778#endif
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788extern unsigned long long notrace sched_clock(void);
1789
1790
1791
1792extern u64 cpu_clock(int cpu);
1793extern u64 local_clock(void);
1794extern u64 sched_clock_cpu(int cpu);
1795
1796
1797extern void sched_clock_init(void);
1798
1799#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1800static inline void sched_clock_tick(void)
1801{
1802}
1803
1804static inline void sched_clock_idle_sleep_event(void)
1805{
1806}
1807
1808static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
1809{
1810}
1811#else
1812
1813
1814
1815
1816
1817
1818extern int sched_clock_stable;
1819
1820extern void sched_clock_tick(void);
1821extern void sched_clock_idle_sleep_event(void);
1822extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1823#endif
1824
1825#ifdef CONFIG_IRQ_TIME_ACCOUNTING
1826
1827
1828
1829
1830
1831extern void enable_sched_clock_irqtime(void);
1832extern void disable_sched_clock_irqtime(void);
1833#else
1834static inline void enable_sched_clock_irqtime(void) {}
1835static inline void disable_sched_clock_irqtime(void) {}
1836#endif
1837
1838extern unsigned long long
1839task_sched_runtime(struct task_struct *task);
1840
1841
1842#ifdef CONFIG_SMP
1843extern void sched_exec(void);
1844#else
1845#define sched_exec() {}
1846#endif
1847
1848extern void sched_clock_idle_sleep_event(void);
1849extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1850
1851#ifdef CONFIG_HOTPLUG_CPU
1852extern void idle_task_exit(void);
1853#else
1854static inline void idle_task_exit(void) {}
1855#endif
1856
1857#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
1858extern void wake_up_nohz_cpu(int cpu);
1859#else
1860static inline void wake_up_nohz_cpu(int cpu) { }
1861#endif
1862
1863#ifdef CONFIG_NO_HZ_FULL
1864extern bool sched_can_stop_tick(void);
1865extern u64 scheduler_tick_max_deferment(void);
1866#else
1867static inline bool sched_can_stop_tick(void) { return false; }
1868#endif
1869
1870#ifdef CONFIG_SCHED_AUTOGROUP
1871extern void sched_autogroup_create_attach(struct task_struct *p);
1872extern void sched_autogroup_detach(struct task_struct *p);
1873extern void sched_autogroup_fork(struct signal_struct *sig);
1874extern void sched_autogroup_exit(struct signal_struct *sig);
1875#ifdef CONFIG_PROC_FS
1876extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
1877extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
1878#endif
1879#else
1880static inline void sched_autogroup_create_attach(struct task_struct *p) { }
1881static inline void sched_autogroup_detach(struct task_struct *p) { }
1882static inline void sched_autogroup_fork(struct signal_struct *sig) { }
1883static inline void sched_autogroup_exit(struct signal_struct *sig) { }
1884#endif
1885
1886extern bool yield_to(struct task_struct *p, bool preempt);
1887extern void set_user_nice(struct task_struct *p, long nice);
1888extern int task_prio(const struct task_struct *p);
1889extern int task_nice(const struct task_struct *p);
1890extern int can_nice(const struct task_struct *p, const int nice);
1891extern int task_curr(const struct task_struct *p);
1892extern int idle_cpu(int cpu);
1893extern int sched_setscheduler(struct task_struct *, int,
1894 const struct sched_param *);
1895extern int sched_setscheduler_nocheck(struct task_struct *, int,
1896 const struct sched_param *);
1897extern struct task_struct *idle_task(int cpu);
1898
1899
1900
1901
1902
1903
1904static inline bool is_idle_task(const struct task_struct *p)
1905{
1906 return p->pid == 0;
1907}
1908extern struct task_struct *curr_task(int cpu);
1909extern void set_curr_task(int cpu, struct task_struct *p);
1910
1911void yield(void);
1912
1913
1914
1915
1916extern struct exec_domain default_exec_domain;
1917
1918union thread_union {
1919 struct thread_info thread_info;
1920 unsigned long stack[THREAD_SIZE/sizeof(long)];
1921};
1922
1923#ifndef __HAVE_ARCH_KSTACK_END
1924static inline int kstack_end(void *addr)
1925{
1926
1927
1928
1929 return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
1930}
1931#endif
1932
1933extern union thread_union init_thread_union;
1934extern struct task_struct init_task;
1935
1936extern struct mm_struct init_mm;
1937
1938extern struct pid_namespace init_pid_ns;
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951extern struct task_struct *find_task_by_vpid(pid_t nr);
1952extern struct task_struct *find_task_by_pid_ns(pid_t nr,
1953 struct pid_namespace *ns);
1954
1955
1956extern struct user_struct * alloc_uid(kuid_t);
1957static inline struct user_struct *get_uid(struct user_struct *u)
1958{
1959 atomic_inc(&u->__count);
1960 return u;
1961}
1962extern void free_uid(struct user_struct *);
1963
1964#include <asm/current.h>
1965
1966extern void xtime_update(unsigned long ticks);
1967
1968extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1969extern int wake_up_process(struct task_struct *tsk);
1970extern void wake_up_new_task(struct task_struct *tsk);
1971#ifdef CONFIG_SMP
1972 extern void kick_process(struct task_struct *tsk);
1973#else
1974 static inline void kick_process(struct task_struct *tsk) { }
1975#endif
1976extern void sched_fork(struct task_struct *p);
1977extern void sched_dead(struct task_struct *p);
1978
1979extern void proc_caches_init(void);
1980extern void flush_signals(struct task_struct *);
1981extern void __flush_signals(struct task_struct *);
1982extern void ignore_signals(struct task_struct *);
1983extern void flush_signal_handlers(struct task_struct *, int force_default);
1984extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
1985
1986static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
1987{
1988 unsigned long flags;
1989 int ret;
1990
1991 spin_lock_irqsave(&tsk->sighand->siglock, flags);
1992 ret = dequeue_signal(tsk, mask, info);
1993 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
1994
1995 return ret;
1996}
1997
1998extern void block_all_signals(int (*notifier)(void *priv), void *priv,
1999 sigset_t *mask);
2000extern void unblock_all_signals(void);
2001extern void release_task(struct task_struct * p);
2002extern int send_sig_info(int, struct siginfo *, struct task_struct *);
2003extern int force_sigsegv(int, struct task_struct *);
2004extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2005extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
2006extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2007extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
2008 const struct cred *, u32);
2009extern int kill_pgrp(struct pid *pid, int sig, int priv);
2010extern int kill_pid(struct pid *pid, int sig, int priv);
2011extern int kill_proc_info(int, struct siginfo *, pid_t);
2012extern __must_check bool do_notify_parent(struct task_struct *, int);
2013extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2014extern void force_sig(int, struct task_struct *);
2015extern int send_sig(int, struct task_struct *, int);
2016extern int zap_other_threads(struct task_struct *p);
2017extern struct sigqueue *sigqueue_alloc(void);
2018extern void sigqueue_free(struct sigqueue *);
2019extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
2020extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
2021
2022static inline void restore_saved_sigmask(void)
2023{
2024 if (test_and_clear_restore_sigmask())
2025 __set_current_blocked(¤t->saved_sigmask);
2026}
2027
2028static inline sigset_t *sigmask_to_save(void)
2029{
2030 sigset_t *res = ¤t->blocked;
2031 if (unlikely(test_restore_sigmask()))
2032 res = ¤t->saved_sigmask;
2033 return res;
2034}
2035
2036static inline int kill_cad_pid(int sig, int priv)
2037{
2038 return kill_pid(cad_pid, sig, priv);
2039}
2040
2041
2042#define SEND_SIG_NOINFO ((struct siginfo *) 0)
2043#define SEND_SIG_PRIV ((struct siginfo *) 1)
2044#define SEND_SIG_FORCED ((struct siginfo *) 2)
2045
2046
2047
2048
2049static inline int on_sig_stack(unsigned long sp)
2050{
2051#ifdef CONFIG_STACK_GROWSUP
2052 return sp >= current->sas_ss_sp &&
2053 sp - current->sas_ss_sp < current->sas_ss_size;
2054#else
2055 return sp > current->sas_ss_sp &&
2056 sp - current->sas_ss_sp <= current->sas_ss_size;
2057#endif
2058}
2059
2060static inline int sas_ss_flags(unsigned long sp)
2061{
2062 return (current->sas_ss_size == 0 ? SS_DISABLE
2063 : on_sig_stack(sp) ? SS_ONSTACK : 0);
2064}
2065
2066static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
2067{
2068 if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
2069#ifdef CONFIG_STACK_GROWSUP
2070 return current->sas_ss_sp;
2071#else
2072 return current->sas_ss_sp + current->sas_ss_size;
2073#endif
2074 return sp;
2075}
2076
2077
2078
2079
2080extern struct mm_struct * mm_alloc(void);
2081
2082
2083extern void __mmdrop(struct mm_struct *);
2084static inline void mmdrop(struct mm_struct * mm)
2085{
2086 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
2087 __mmdrop(mm);
2088}
2089
2090
2091extern void mmput(struct mm_struct *);
2092
2093extern struct mm_struct *get_task_mm(struct task_struct *task);
2094
2095
2096
2097
2098
2099extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
2100
2101extern void mm_release(struct task_struct *, struct mm_struct *);
2102
2103extern struct mm_struct *dup_mm(struct task_struct *tsk);
2104
2105extern int copy_thread(unsigned long, unsigned long, unsigned long,
2106 struct task_struct *);
2107extern void flush_thread(void);
2108extern void exit_thread(void);
2109
2110extern void exit_files(struct task_struct *);
2111extern void __cleanup_sighand(struct sighand_struct *);
2112
2113extern void exit_itimers(struct signal_struct *);
2114extern void flush_itimer_signals(void);
2115
2116extern void do_group_exit(int);
2117
2118extern int allow_signal(int);
2119extern int disallow_signal(int);
2120
2121extern int do_execve(const char *,
2122 const char __user * const __user *,
2123 const char __user * const __user *);
2124extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
2125struct task_struct *fork_idle(int);
2126extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
2127
2128extern void set_task_comm(struct task_struct *tsk, char *from);
2129extern char *get_task_comm(char *to, struct task_struct *tsk);
2130
2131#ifdef CONFIG_SMP
2132void scheduler_ipi(void);
2133extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
2134#else
2135static inline void scheduler_ipi(void) { }
2136static inline unsigned long wait_task_inactive(struct task_struct *p,
2137 long match_state)
2138{
2139 return 1;
2140}
2141#endif
2142
2143#define next_task(p) \
2144 list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
2145
2146#define for_each_process(p) \
2147 for (p = &init_task ; (p = next_task(p)) != &init_task ; )
2148
2149extern bool current_is_single_threaded(void);
2150
2151
2152
2153
2154
2155#define do_each_thread(g, t) \
2156 for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
2157
2158#define while_each_thread(g, t) \
2159 while ((t = next_thread(t)) != g)
2160
2161static inline int get_nr_threads(struct task_struct *tsk)
2162{
2163 return tsk->signal->nr_threads;
2164}
2165
2166static inline bool thread_group_leader(struct task_struct *p)
2167{
2168 return p->exit_signal >= 0;
2169}
2170
2171
2172
2173
2174
2175
2176
2177static inline int has_group_leader_pid(struct task_struct *p)
2178{
2179 return p->pid == p->tgid;
2180}
2181
2182static inline
2183int same_thread_group(struct task_struct *p1, struct task_struct *p2)
2184{
2185 return p1->tgid == p2->tgid;
2186}
2187
2188static inline struct task_struct *next_thread(const struct task_struct *p)
2189{
2190 return list_entry_rcu(p->thread_group.next,
2191 struct task_struct, thread_group);
2192}
2193
2194static inline int thread_group_empty(struct task_struct *p)
2195{
2196 return list_empty(&p->thread_group);
2197}
2198
2199#define delay_group_leader(p) \
2200 (thread_group_leader(p) && !thread_group_empty(p))
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212static inline void task_lock(struct task_struct *p)
2213{
2214 spin_lock(&p->alloc_lock);
2215}
2216
2217static inline void task_unlock(struct task_struct *p)
2218{
2219 spin_unlock(&p->alloc_lock);
2220}
2221
2222extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2223 unsigned long *flags);
2224
2225static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
2226 unsigned long *flags)
2227{
2228 struct sighand_struct *ret;
2229
2230 ret = __lock_task_sighand(tsk, flags);
2231 (void)__cond_lock(&tsk->sighand->siglock, ret);
2232 return ret;
2233}
2234
2235static inline void unlock_task_sighand(struct task_struct *tsk,
2236 unsigned long *flags)
2237{
2238 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2239}
2240
2241#ifdef CONFIG_CGROUPS
2242static inline void threadgroup_change_begin(struct task_struct *tsk)
2243{
2244 down_read(&tsk->signal->group_rwsem);
2245}
2246static inline void threadgroup_change_end(struct task_struct *tsk)
2247{
2248 up_read(&tsk->signal->group_rwsem);
2249}
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267static inline void threadgroup_lock(struct task_struct *tsk)
2268{
2269 down_write(&tsk->signal->group_rwsem);
2270}
2271
2272
2273
2274
2275
2276
2277
2278static inline void threadgroup_unlock(struct task_struct *tsk)
2279{
2280 up_write(&tsk->signal->group_rwsem);
2281}
2282#else
2283static inline void threadgroup_change_begin(struct task_struct *tsk) {}
2284static inline void threadgroup_change_end(struct task_struct *tsk) {}
2285static inline void threadgroup_lock(struct task_struct *tsk) {}
2286static inline void threadgroup_unlock(struct task_struct *tsk) {}
2287#endif
2288
2289#ifndef __HAVE_THREAD_FUNCTIONS
2290
2291#define task_thread_info(task) ((struct thread_info *)(task)->stack)
2292#define task_stack_page(task) ((task)->stack)
2293
2294static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
2295{
2296 *task_thread_info(p) = *task_thread_info(org);
2297 task_thread_info(p)->task = p;
2298}
2299
2300static inline unsigned long *end_of_stack(struct task_struct *p)
2301{
2302 return (unsigned long *)(task_thread_info(p) + 1);
2303}
2304
2305#endif
2306
2307static inline int object_is_on_stack(void *obj)
2308{
2309 void *stack = task_stack_page(current);
2310
2311 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
2312}
2313
2314extern void thread_info_cache_init(void);
2315
2316#ifdef CONFIG_DEBUG_STACK_USAGE
2317static inline unsigned long stack_not_used(struct task_struct *p)
2318{
2319 unsigned long *n = end_of_stack(p);
2320
2321 do {
2322 n++;
2323 } while (!*n);
2324
2325 return (unsigned long)n - (unsigned long)end_of_stack(p);
2326}
2327#endif
2328
2329
2330
2331
2332static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
2333{
2334 set_ti_thread_flag(task_thread_info(tsk), flag);
2335}
2336
2337static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2338{
2339 clear_ti_thread_flag(task_thread_info(tsk), flag);
2340}
2341
2342static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
2343{
2344 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
2345}
2346
2347static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2348{
2349 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
2350}
2351
2352static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2353{
2354 return test_ti_thread_flag(task_thread_info(tsk), flag);
2355}
2356
2357static inline void set_tsk_need_resched(struct task_struct *tsk)
2358{
2359 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2360}
2361
2362static inline void clear_tsk_need_resched(struct task_struct *tsk)
2363{
2364 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2365}
2366
2367static inline int test_tsk_need_resched(struct task_struct *tsk)
2368{
2369 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2370}
2371
2372static inline int restart_syscall(void)
2373{
2374 set_tsk_thread_flag(current, TIF_SIGPENDING);
2375 return -ERESTARTNOINTR;
2376}
2377
2378static inline int signal_pending(struct task_struct *p)
2379{
2380 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
2381}
2382
2383static inline int __fatal_signal_pending(struct task_struct *p)
2384{
2385 return unlikely(sigismember(&p->pending.signal, SIGKILL));
2386}
2387
2388static inline int fatal_signal_pending(struct task_struct *p)
2389{
2390 return signal_pending(p) && __fatal_signal_pending(p);
2391}
2392
2393static inline int signal_pending_state(long state, struct task_struct *p)
2394{
2395 if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
2396 return 0;
2397 if (!signal_pending(p))
2398 return 0;
2399
2400 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
2401}
2402
2403static inline int need_resched(void)
2404{
2405 return unlikely(test_thread_flag(TIF_NEED_RESCHED));
2406}
2407
2408
2409
2410
2411
2412
2413
2414
2415extern int _cond_resched(void);
2416
2417#define cond_resched() ({ \
2418 __might_sleep(__FILE__, __LINE__, 0); \
2419 _cond_resched(); \
2420})
2421
2422extern int __cond_resched_lock(spinlock_t *lock);
2423
2424#ifdef CONFIG_PREEMPT_COUNT
2425#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
2426#else
2427#define PREEMPT_LOCK_OFFSET 0
2428#endif
2429
2430#define cond_resched_lock(lock) ({ \
2431 __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
2432 __cond_resched_lock(lock); \
2433})
2434
2435extern int __cond_resched_softirq(void);
2436
2437#define cond_resched_softirq() ({ \
2438 __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
2439 __cond_resched_softirq(); \
2440})
2441
2442static inline void cond_resched_rcu(void)
2443{
2444#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
2445 rcu_read_unlock();
2446 cond_resched();
2447 rcu_read_lock();
2448#endif
2449}
2450
2451
2452
2453
2454
2455
2456static inline int spin_needbreak(spinlock_t *lock)
2457{
2458#ifdef CONFIG_PREEMPT
2459 return spin_is_contended(lock);
2460#else
2461 return 0;
2462#endif
2463}
2464
2465
2466
2467
2468
2469
2470
2471#ifdef TS_POLLING
2472static inline int tsk_is_polling(struct task_struct *p)
2473{
2474 return task_thread_info(p)->status & TS_POLLING;
2475}
2476static inline void current_set_polling(void)
2477{
2478 current_thread_info()->status |= TS_POLLING;
2479}
2480
2481static inline void current_clr_polling(void)
2482{
2483 current_thread_info()->status &= ~TS_POLLING;
2484 smp_mb__after_clear_bit();
2485}
2486#elif defined(TIF_POLLING_NRFLAG)
2487static inline int tsk_is_polling(struct task_struct *p)
2488{
2489 return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
2490}
2491static inline void current_set_polling(void)
2492{
2493 set_thread_flag(TIF_POLLING_NRFLAG);
2494}
2495
2496static inline void current_clr_polling(void)
2497{
2498 clear_thread_flag(TIF_POLLING_NRFLAG);
2499}
2500#else
2501static inline int tsk_is_polling(struct task_struct *p) { return 0; }
2502static inline void current_set_polling(void) { }
2503static inline void current_clr_polling(void) { }
2504#endif
2505
2506
2507
2508
2509void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
2510void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2511
2512static inline void thread_group_cputime_init(struct signal_struct *sig)
2513{
2514 raw_spin_lock_init(&sig->cputimer.lock);
2515}
2516
2517
2518
2519
2520
2521
2522
2523extern void recalc_sigpending_and_wake(struct task_struct *t);
2524extern void recalc_sigpending(void);
2525
2526extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
2527
2528static inline void signal_wake_up(struct task_struct *t, bool resume)
2529{
2530 signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
2531}
2532static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
2533{
2534 signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
2535}
2536
2537
2538
2539
2540#ifdef CONFIG_SMP
2541
2542static inline unsigned int task_cpu(const struct task_struct *p)
2543{
2544 return task_thread_info(p)->cpu;
2545}
2546
2547extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
2548
2549#else
2550
2551static inline unsigned int task_cpu(const struct task_struct *p)
2552{
2553 return 0;
2554}
2555
2556static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2557{
2558}
2559
2560#endif
2561
2562extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2563extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2564
2565#ifdef CONFIG_CGROUP_SCHED
2566extern struct task_group root_task_group;
2567#endif
2568
2569extern int task_can_switch_user(struct user_struct *up,
2570 struct task_struct *tsk);
2571
2572#ifdef CONFIG_TASK_XACCT
2573static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2574{
2575 tsk->ioac.rchar += amt;
2576}
2577
2578static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2579{
2580 tsk->ioac.wchar += amt;
2581}
2582
2583static inline void inc_syscr(struct task_struct *tsk)
2584{
2585 tsk->ioac.syscr++;
2586}
2587
2588static inline void inc_syscw(struct task_struct *tsk)
2589{
2590 tsk->ioac.syscw++;
2591}
2592#else
2593static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2594{
2595}
2596
2597static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2598{
2599}
2600
2601static inline void inc_syscr(struct task_struct *tsk)
2602{
2603}
2604
2605static inline void inc_syscw(struct task_struct *tsk)
2606{
2607}
2608#endif
2609
2610#ifndef TASK_SIZE_OF
2611#define TASK_SIZE_OF(tsk) TASK_SIZE
2612#endif
2613
2614#ifdef CONFIG_MM_OWNER
2615extern void mm_update_next_owner(struct mm_struct *mm);
2616extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
2617#else
2618static inline void mm_update_next_owner(struct mm_struct *mm)
2619{
2620}
2621
2622static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
2623{
2624}
2625#endif
2626
2627static inline unsigned long task_rlimit(const struct task_struct *tsk,
2628 unsigned int limit)
2629{
2630 return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
2631}
2632
2633static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
2634 unsigned int limit)
2635{
2636 return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
2637}
2638
2639static inline unsigned long rlimit(unsigned int limit)
2640{
2641 return task_rlimit(current, limit);
2642}
2643
2644static inline unsigned long rlimit_max(unsigned int limit)
2645{
2646 return task_rlimit_max(current, limit);
2647}
2648
2649#endif
2650