1
2#ifndef _LINUX_SCHED_TASK_H
3#define _LINUX_SCHED_TASK_H
4
5
6
7
8
9
10#include <linux/sched.h>
11
12struct task_struct;
13struct rusage;
14union thread_union;
15
16
17
18
19
20
21
22extern rwlock_t tasklist_lock;
23extern spinlock_t mmlist_lock;
24
25extern union thread_union init_thread_union;
26extern struct task_struct init_task;
27
28#ifdef CONFIG_PROVE_RCU
29extern int lockdep_tasklist_lock_is_held(void);
30#endif
31
32extern asmlinkage void schedule_tail(struct task_struct *prev);
33extern void init_idle(struct task_struct *idle, int cpu);
34
35extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
36extern void sched_dead(struct task_struct *p);
37
38void __noreturn do_task_dead(void);
39
40extern void proc_caches_init(void);
41
42extern void release_task(struct task_struct * p);
43
44#ifdef CONFIG_HAVE_COPY_THREAD_TLS
45extern int copy_thread_tls(unsigned long, unsigned long, unsigned long,
46 struct task_struct *, unsigned long);
47#else
48extern int copy_thread(unsigned long, unsigned long, unsigned long,
49 struct task_struct *);
50
51
52
53static inline int copy_thread_tls(
54 unsigned long clone_flags, unsigned long sp, unsigned long arg,
55 struct task_struct *p, unsigned long tls)
56{
57 return copy_thread(clone_flags, sp, arg, p);
58}
59#endif
60extern void flush_thread(void);
61
62#ifdef CONFIG_HAVE_EXIT_THREAD
63extern void exit_thread(struct task_struct *tsk);
64#else
65static inline void exit_thread(struct task_struct *tsk)
66{
67}
68#endif
69extern void do_group_exit(int);
70
71extern void exit_files(struct task_struct *);
72extern void exit_itimers(struct signal_struct *);
73
74extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long);
75extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
76struct task_struct *fork_idle(int);
77struct mm_struct *copy_init_mm(void);
78extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
79extern long kernel_wait4(pid_t, int __user *, int, struct rusage *);
80
81extern void free_task(struct task_struct *tsk);
82
83
84#ifdef CONFIG_SMP
85extern void sched_exec(void);
86#else
87#define sched_exec() {}
88#endif
89
90static inline struct task_struct *get_task_struct(struct task_struct *t)
91{
92 refcount_inc(&t->usage);
93 return t;
94}
95
96extern void __put_task_struct(struct task_struct *t);
97
98static inline void put_task_struct(struct task_struct *t)
99{
100 if (refcount_dec_and_test(&t->usage))
101 __put_task_struct(t);
102}
103
104void put_task_struct_rcu_user(struct task_struct *task);
105
106#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
107extern int arch_task_struct_size __read_mostly;
108#else
109# define arch_task_struct_size (sizeof(struct task_struct))
110#endif
111
112#ifndef CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST
113
114
115
116
117static inline void arch_thread_struct_whitelist(unsigned long *offset,
118 unsigned long *size)
119{
120 *offset = 0;
121
122 *size = arch_task_struct_size - offsetof(struct task_struct, thread);
123}
124#endif
125
126#ifdef CONFIG_VMAP_STACK
127static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
128{
129 return t->stack_vm_area;
130}
131#else
132static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
133{
134 return NULL;
135}
136#endif
137
138
139
140
141
142
143
144
145
146
147
148static inline void task_lock(struct task_struct *p)
149{
150 spin_lock(&p->alloc_lock);
151}
152
153static inline void task_unlock(struct task_struct *p)
154{
155 spin_unlock(&p->alloc_lock);
156}
157
158#endif
159