1
2
3
4
5
6
7
8#include <linux/sched.h>
9#include <linux/kthread.h>
10#include <linux/completion.h>
11#include <linux/err.h>
12#include <linux/cpuset.h>
13#include <linux/unistd.h>
14#include <linux/file.h>
15#include <linux/export.h>
16#include <linux/mutex.h>
17#include <linux/slab.h>
18#include <linux/freezer.h>
19#include <trace/events/sched.h>
20
21static DEFINE_SPINLOCK(kthread_create_lock);
22static LIST_HEAD(kthread_create_list);
23struct task_struct *kthreadd_task;
24
25struct kthread_create_info
26{
27
28 int (*threadfn)(void *data);
29 void *data;
30 int node;
31
32
33 struct task_struct *result;
34 struct completion done;
35
36 struct list_head list;
37};
38
39struct kthread {
40 int should_stop;
41 void *data;
42 struct completion exited;
43};
44
45#define to_kthread(tsk) \
46 container_of((tsk)->vfork_done, struct kthread, exited)
47
48
49
50
51
52
53
54
55int kthread_should_stop(void)
56{
57 return to_kthread(current)->should_stop;
58}
59EXPORT_SYMBOL(kthread_should_stop);
60
61
62
63
64
65
66
67
68
69void *kthread_data(struct task_struct *task)
70{
71 return to_kthread(task)->data;
72}
73
74static int kthread(void *_create)
75{
76
77 struct kthread_create_info *create = _create;
78 int (*threadfn)(void *data) = create->threadfn;
79 void *data = create->data;
80 struct kthread self;
81 int ret;
82
83 self.should_stop = 0;
84 self.data = data;
85 init_completion(&self.exited);
86 current->vfork_done = &self.exited;
87
88
89 __set_current_state(TASK_UNINTERRUPTIBLE);
90 create->result = current;
91 complete(&create->done);
92 schedule();
93
94 ret = -EINTR;
95 if (!self.should_stop)
96 ret = threadfn(data);
97
98
99 do_exit(ret);
100}
101
102
103int tsk_fork_get_node(struct task_struct *tsk)
104{
105#ifdef CONFIG_NUMA
106 if (tsk == kthreadd_task)
107 return tsk->pref_node_fork;
108#endif
109 return numa_node_id();
110}
111
112static void create_kthread(struct kthread_create_info *create)
113{
114 int pid;
115
116#ifdef CONFIG_NUMA
117 current->pref_node_fork = create->node;
118#endif
119
120 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
121 if (pid < 0) {
122 create->result = ERR_PTR(pid);
123 complete(&create->done);
124 }
125}
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
150 void *data,
151 int node,
152 const char namefmt[],
153 ...)
154{
155 struct kthread_create_info create;
156
157 create.threadfn = threadfn;
158 create.data = data;
159 create.node = node;
160 init_completion(&create.done);
161
162 spin_lock(&kthread_create_lock);
163 list_add_tail(&create.list, &kthread_create_list);
164 spin_unlock(&kthread_create_lock);
165
166 wake_up_process(kthreadd_task);
167 wait_for_completion(&create.done);
168
169 if (!IS_ERR(create.result)) {
170 static const struct sched_param param = { .sched_priority = 0 };
171 va_list args;
172
173 va_start(args, namefmt);
174 vsnprintf(create.result->comm, sizeof(create.result->comm),
175 namefmt, args);
176 va_end(args);
177
178
179
180
181 sched_setscheduler_nocheck(create.result, SCHED_NORMAL, ¶m);
182 set_cpus_allowed_ptr(create.result, cpu_all_mask);
183 }
184 return create.result;
185}
186EXPORT_SYMBOL(kthread_create_on_node);
187
188
189
190
191
192
193
194
195
196
197void kthread_bind(struct task_struct *p, unsigned int cpu)
198{
199
200 if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
201 WARN_ON(1);
202 return;
203 }
204
205
206 do_set_cpus_allowed(p, cpumask_of(cpu));
207 p->flags |= PF_THREAD_BOUND;
208}
209EXPORT_SYMBOL(kthread_bind);
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226int kthread_stop(struct task_struct *k)
227{
228 struct kthread *kthread;
229 int ret;
230
231 trace_sched_kthread_stop(k);
232 get_task_struct(k);
233
234 kthread = to_kthread(k);
235 barrier();
236 if (k->vfork_done != NULL) {
237 kthread->should_stop = 1;
238 wake_up_process(k);
239 wait_for_completion(&kthread->exited);
240 }
241 ret = k->exit_code;
242
243 put_task_struct(k);
244 trace_sched_kthread_stop_ret(ret);
245
246 return ret;
247}
248EXPORT_SYMBOL(kthread_stop);
249
250int kthreadd(void *unused)
251{
252 struct task_struct *tsk = current;
253
254
255 set_task_comm(tsk, "kthreadd");
256 ignore_signals(tsk);
257 set_cpus_allowed_ptr(tsk, cpu_all_mask);
258 set_mems_allowed(node_states[N_HIGH_MEMORY]);
259
260 current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;
261
262 for (;;) {
263 set_current_state(TASK_INTERRUPTIBLE);
264 if (list_empty(&kthread_create_list))
265 schedule();
266 __set_current_state(TASK_RUNNING);
267
268 spin_lock(&kthread_create_lock);
269 while (!list_empty(&kthread_create_list)) {
270 struct kthread_create_info *create;
271
272 create = list_entry(kthread_create_list.next,
273 struct kthread_create_info, list);
274 list_del_init(&create->list);
275 spin_unlock(&kthread_create_lock);
276
277 create_kthread(create);
278
279 spin_lock(&kthread_create_lock);
280 }
281 spin_unlock(&kthread_create_lock);
282 }
283
284 return 0;
285}
286
287void __init_kthread_worker(struct kthread_worker *worker,
288 const char *name,
289 struct lock_class_key *key)
290{
291 spin_lock_init(&worker->lock);
292 lockdep_set_class_and_name(&worker->lock, key, name);
293 INIT_LIST_HEAD(&worker->work_list);
294 worker->task = NULL;
295}
296EXPORT_SYMBOL_GPL(__init_kthread_worker);
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313int kthread_worker_fn(void *worker_ptr)
314{
315 struct kthread_worker *worker = worker_ptr;
316 struct kthread_work *work;
317
318 WARN_ON(worker->task);
319 worker->task = current;
320repeat:
321 set_current_state(TASK_INTERRUPTIBLE);
322
323 if (kthread_should_stop()) {
324 __set_current_state(TASK_RUNNING);
325 spin_lock_irq(&worker->lock);
326 worker->task = NULL;
327 spin_unlock_irq(&worker->lock);
328 return 0;
329 }
330
331 work = NULL;
332 spin_lock_irq(&worker->lock);
333 if (!list_empty(&worker->work_list)) {
334 work = list_first_entry(&worker->work_list,
335 struct kthread_work, node);
336 list_del_init(&work->node);
337 }
338 spin_unlock_irq(&worker->lock);
339
340 if (work) {
341 __set_current_state(TASK_RUNNING);
342 work->func(work);
343 smp_wmb();
344 work->done_seq = work->queue_seq;
345 smp_mb();
346 if (atomic_read(&work->flushing))
347 wake_up_all(&work->done);
348 } else if (!freezing(current))
349 schedule();
350
351 try_to_freeze();
352 goto repeat;
353}
354EXPORT_SYMBOL_GPL(kthread_worker_fn);
355
356
357
358
359
360
361
362
363
364
365bool queue_kthread_work(struct kthread_worker *worker,
366 struct kthread_work *work)
367{
368 bool ret = false;
369 unsigned long flags;
370
371 spin_lock_irqsave(&worker->lock, flags);
372 if (list_empty(&work->node)) {
373 list_add_tail(&work->node, &worker->work_list);
374 work->queue_seq++;
375 if (likely(worker->task))
376 wake_up_process(worker->task);
377 ret = true;
378 }
379 spin_unlock_irqrestore(&worker->lock, flags);
380 return ret;
381}
382EXPORT_SYMBOL_GPL(queue_kthread_work);
383
384
385
386
387
388
389
390void flush_kthread_work(struct kthread_work *work)
391{
392 int seq = work->queue_seq;
393
394 atomic_inc(&work->flushing);
395
396
397
398
399
400 smp_mb__after_atomic_inc();
401
402
403 wait_event(work->done, seq - work->done_seq <= 0);
404 atomic_dec(&work->flushing);
405
406
407
408
409
410 smp_mb__after_atomic_dec();
411}
412EXPORT_SYMBOL_GPL(flush_kthread_work);
413
414struct kthread_flush_work {
415 struct kthread_work work;
416 struct completion done;
417};
418
419static void kthread_flush_work_fn(struct kthread_work *work)
420{
421 struct kthread_flush_work *fwork =
422 container_of(work, struct kthread_flush_work, work);
423 complete(&fwork->done);
424}
425
426
427
428
429
430
431
432
433void flush_kthread_worker(struct kthread_worker *worker)
434{
435 struct kthread_flush_work fwork = {
436 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
437 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
438 };
439
440 queue_kthread_work(worker, &fwork.work);
441 wait_for_completion(&fwork.done);
442}
443EXPORT_SYMBOL_GPL(flush_kthread_worker);
444