1
2
3
4
5
6
7
8#include <linux/sched.h>
9#include <linux/kthread.h>
10#include <linux/completion.h>
11#include <linux/err.h>
12#include <linux/cpuset.h>
13#include <linux/unistd.h>
14#include <linux/file.h>
15#include <linux/export.h>
16#include <linux/mutex.h>
17#include <linux/slab.h>
18#include <linux/freezer.h>
19#include <linux/ptrace.h>
20#include <linux/uaccess.h>
21#include <trace/events/sched.h>
22
23static DEFINE_SPINLOCK(kthread_create_lock);
24static LIST_HEAD(kthread_create_list);
25struct task_struct *kthreadd_task;
26
27struct kthread_create_info
28{
29
30 int (*threadfn)(void *data);
31 void *data;
32 int node;
33
34
35 struct task_struct *result;
36 struct completion done;
37
38 struct list_head list;
39};
40
41struct kthread {
42 unsigned long flags;
43 unsigned int cpu;
44 void *data;
45 struct completion parked;
46 struct completion exited;
47};
48
49enum KTHREAD_BITS {
50 KTHREAD_IS_PER_CPU = 0,
51 KTHREAD_SHOULD_STOP,
52 KTHREAD_SHOULD_PARK,
53 KTHREAD_IS_PARKED,
54};
55
56#define __to_kthread(vfork) \
57 container_of(vfork, struct kthread, exited)
58
59static inline struct kthread *to_kthread(struct task_struct *k)
60{
61 return __to_kthread(k->vfork_done);
62}
63
64static struct kthread *to_live_kthread(struct task_struct *k)
65{
66 struct completion *vfork = ACCESS_ONCE(k->vfork_done);
67 if (likely(vfork))
68 return __to_kthread(vfork);
69 return NULL;
70}
71
72
73
74
75
76
77
78
79bool kthread_should_stop(void)
80{
81 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
82}
83EXPORT_SYMBOL(kthread_should_stop);
84
85
86
87
88
89
90
91
92
93
94
95
96bool kthread_should_park(void)
97{
98 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
99}
100EXPORT_SYMBOL_GPL(kthread_should_park);
101
102
103
104
105
106
107
108
109
110
111bool kthread_freezable_should_stop(bool *was_frozen)
112{
113 bool frozen = false;
114
115 might_sleep();
116
117 if (unlikely(freezing(current)))
118 frozen = __refrigerator(true);
119
120 if (was_frozen)
121 *was_frozen = frozen;
122
123 return kthread_should_stop();
124}
125EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
126
127
128
129
130
131
132
133
134
135void *kthread_data(struct task_struct *task)
136{
137 return to_kthread(task)->data;
138}
139
140
141
142
143
144
145
146
147
148
149void *probe_kthread_data(struct task_struct *task)
150{
151 struct kthread *kthread = to_kthread(task);
152 void *data = NULL;
153
154 probe_kernel_read(&data, &kthread->data, sizeof(data));
155 return data;
156}
157
158static void __kthread_parkme(struct kthread *self)
159{
160 __set_current_state(TASK_PARKED);
161 while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
162 if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
163 complete(&self->parked);
164 schedule();
165 __set_current_state(TASK_PARKED);
166 }
167 clear_bit(KTHREAD_IS_PARKED, &self->flags);
168 __set_current_state(TASK_RUNNING);
169}
170
171void kthread_parkme(void)
172{
173 __kthread_parkme(to_kthread(current));
174}
175EXPORT_SYMBOL_GPL(kthread_parkme);
176
177static int kthread(void *_create)
178{
179
180 struct kthread_create_info *create = _create;
181 int (*threadfn)(void *data) = create->threadfn;
182 void *data = create->data;
183 struct kthread self;
184 int ret;
185
186 self.flags = 0;
187 self.data = data;
188 init_completion(&self.exited);
189 init_completion(&self.parked);
190 current->vfork_done = &self.exited;
191
192
193 __set_current_state(TASK_UNINTERRUPTIBLE);
194 create->result = current;
195 complete(&create->done);
196 schedule();
197
198 ret = -EINTR;
199
200 if (!test_bit(KTHREAD_SHOULD_STOP, &self.flags)) {
201 __kthread_parkme(&self);
202 ret = threadfn(data);
203 }
204
205 do_exit(ret);
206}
207
208
209int tsk_fork_get_node(struct task_struct *tsk)
210{
211#ifdef CONFIG_NUMA
212 if (tsk == kthreadd_task)
213 return tsk->pref_node_fork;
214#endif
215 return NUMA_NO_NODE;
216}
217
218static void create_kthread(struct kthread_create_info *create)
219{
220 int pid;
221
222#ifdef CONFIG_NUMA
223 current->pref_node_fork = create->node;
224#endif
225
226 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
227 if (pid < 0) {
228 create->result = ERR_PTR(pid);
229 complete(&create->done);
230 }
231}
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
256 void *data, int node,
257 const char namefmt[],
258 ...)
259{
260 struct kthread_create_info create;
261
262 create.threadfn = threadfn;
263 create.data = data;
264 create.node = node;
265 init_completion(&create.done);
266
267 spin_lock(&kthread_create_lock);
268 list_add_tail(&create.list, &kthread_create_list);
269 spin_unlock(&kthread_create_lock);
270
271 wake_up_process(kthreadd_task);
272 wait_for_completion(&create.done);
273
274 if (!IS_ERR(create.result)) {
275 static const struct sched_param param = { .sched_priority = 0 };
276 va_list args;
277
278 va_start(args, namefmt);
279 vsnprintf(create.result->comm, sizeof(create.result->comm),
280 namefmt, args);
281 va_end(args);
282
283
284
285
286 sched_setscheduler_nocheck(create.result, SCHED_NORMAL, ¶m);
287 set_cpus_allowed_ptr(create.result, cpu_all_mask);
288 }
289 return create.result;
290}
291EXPORT_SYMBOL(kthread_create_on_node);
292
293static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
294{
295
296 if (!wait_task_inactive(p, state)) {
297 WARN_ON(1);
298 return;
299 }
300
301 do_set_cpus_allowed(p, cpumask_of(cpu));
302 p->flags |= PF_NO_SETAFFINITY;
303}
304
305
306
307
308
309
310
311
312
313
314void kthread_bind(struct task_struct *p, unsigned int cpu)
315{
316 __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
317}
318EXPORT_SYMBOL(kthread_bind);
319
320
321
322
323
324
325
326
327
328
329
330
331struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
332 void *data, unsigned int cpu,
333 const char *namefmt)
334{
335 struct task_struct *p;
336
337 p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
338 cpu);
339 if (IS_ERR(p))
340 return p;
341 set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
342 to_kthread(p)->cpu = cpu;
343
344 kthread_park(p);
345 return p;
346}
347
348static void __kthread_unpark(struct task_struct *k, struct kthread *kthread)
349{
350 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
351
352
353
354
355
356
357 if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
358 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
359 __kthread_bind(k, kthread->cpu, TASK_PARKED);
360 wake_up_state(k, TASK_PARKED);
361 }
362}
363
364
365
366
367
368
369
370
371
372void kthread_unpark(struct task_struct *k)
373{
374 struct kthread *kthread = to_live_kthread(k);
375
376 if (kthread)
377 __kthread_unpark(k, kthread);
378}
379EXPORT_SYMBOL_GPL(kthread_unpark);
380
381
382
383
384
385
386
387
388
389
390
391
392
393int kthread_park(struct task_struct *k)
394{
395 struct kthread *kthread = to_live_kthread(k);
396 int ret = -ENOSYS;
397
398 if (kthread) {
399 if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
400 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
401 if (k != current) {
402 wake_up_process(k);
403 wait_for_completion(&kthread->parked);
404 }
405 }
406 ret = 0;
407 }
408 return ret;
409}
410EXPORT_SYMBOL_GPL(kthread_park);
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427int kthread_stop(struct task_struct *k)
428{
429 struct kthread *kthread;
430 int ret;
431
432 trace_sched_kthread_stop(k);
433
434 get_task_struct(k);
435 kthread = to_live_kthread(k);
436 if (kthread) {
437 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
438 __kthread_unpark(k, kthread);
439 wake_up_process(k);
440 wait_for_completion(&kthread->exited);
441 }
442 ret = k->exit_code;
443 put_task_struct(k);
444
445 trace_sched_kthread_stop_ret(ret);
446 return ret;
447}
448EXPORT_SYMBOL(kthread_stop);
449
450int kthreadd(void *unused)
451{
452 struct task_struct *tsk = current;
453
454
455 set_task_comm(tsk, "kthreadd");
456 ignore_signals(tsk);
457 set_cpus_allowed_ptr(tsk, cpu_all_mask);
458 set_mems_allowed(node_states[N_MEMORY]);
459
460 current->flags |= PF_NOFREEZE;
461
462 for (;;) {
463 set_current_state(TASK_INTERRUPTIBLE);
464 if (list_empty(&kthread_create_list))
465 schedule();
466 __set_current_state(TASK_RUNNING);
467
468 spin_lock(&kthread_create_lock);
469 while (!list_empty(&kthread_create_list)) {
470 struct kthread_create_info *create;
471
472 create = list_entry(kthread_create_list.next,
473 struct kthread_create_info, list);
474 list_del_init(&create->list);
475 spin_unlock(&kthread_create_lock);
476
477 create_kthread(create);
478
479 spin_lock(&kthread_create_lock);
480 }
481 spin_unlock(&kthread_create_lock);
482 }
483
484 return 0;
485}
486
487void __init_kthread_worker(struct kthread_worker *worker,
488 const char *name,
489 struct lock_class_key *key)
490{
491 spin_lock_init(&worker->lock);
492 lockdep_set_class_and_name(&worker->lock, key, name);
493 INIT_LIST_HEAD(&worker->work_list);
494 worker->task = NULL;
495}
496EXPORT_SYMBOL_GPL(__init_kthread_worker);
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513int kthread_worker_fn(void *worker_ptr)
514{
515 struct kthread_worker *worker = worker_ptr;
516 struct kthread_work *work;
517
518 WARN_ON(worker->task);
519 worker->task = current;
520repeat:
521 set_current_state(TASK_INTERRUPTIBLE);
522
523 if (kthread_should_stop()) {
524 __set_current_state(TASK_RUNNING);
525 spin_lock_irq(&worker->lock);
526 worker->task = NULL;
527 spin_unlock_irq(&worker->lock);
528 return 0;
529 }
530
531 work = NULL;
532 spin_lock_irq(&worker->lock);
533 if (!list_empty(&worker->work_list)) {
534 work = list_first_entry(&worker->work_list,
535 struct kthread_work, node);
536 list_del_init(&work->node);
537 }
538 worker->current_work = work;
539 spin_unlock_irq(&worker->lock);
540
541 if (work) {
542 __set_current_state(TASK_RUNNING);
543 work->func(work);
544 } else if (!freezing(current))
545 schedule();
546
547 try_to_freeze();
548 goto repeat;
549}
550EXPORT_SYMBOL_GPL(kthread_worker_fn);
551
552
553static void insert_kthread_work(struct kthread_worker *worker,
554 struct kthread_work *work,
555 struct list_head *pos)
556{
557 lockdep_assert_held(&worker->lock);
558
559 list_add_tail(&work->node, pos);
560 work->worker = worker;
561 if (likely(worker->task))
562 wake_up_process(worker->task);
563}
564
565
566
567
568
569
570
571
572
573
574bool queue_kthread_work(struct kthread_worker *worker,
575 struct kthread_work *work)
576{
577 bool ret = false;
578 unsigned long flags;
579
580 spin_lock_irqsave(&worker->lock, flags);
581 if (list_empty(&work->node)) {
582 insert_kthread_work(worker, work, &worker->work_list);
583 ret = true;
584 }
585 spin_unlock_irqrestore(&worker->lock, flags);
586 return ret;
587}
588EXPORT_SYMBOL_GPL(queue_kthread_work);
589
590struct kthread_flush_work {
591 struct kthread_work work;
592 struct completion done;
593};
594
595static void kthread_flush_work_fn(struct kthread_work *work)
596{
597 struct kthread_flush_work *fwork =
598 container_of(work, struct kthread_flush_work, work);
599 complete(&fwork->done);
600}
601
602
603
604
605
606
607
608void flush_kthread_work(struct kthread_work *work)
609{
610 struct kthread_flush_work fwork = {
611 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
612 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
613 };
614 struct kthread_worker *worker;
615 bool noop = false;
616
617retry:
618 worker = work->worker;
619 if (!worker)
620 return;
621
622 spin_lock_irq(&worker->lock);
623 if (work->worker != worker) {
624 spin_unlock_irq(&worker->lock);
625 goto retry;
626 }
627
628 if (!list_empty(&work->node))
629 insert_kthread_work(worker, &fwork.work, work->node.next);
630 else if (worker->current_work == work)
631 insert_kthread_work(worker, &fwork.work, worker->work_list.next);
632 else
633 noop = true;
634
635 spin_unlock_irq(&worker->lock);
636
637 if (!noop)
638 wait_for_completion(&fwork.done);
639}
640EXPORT_SYMBOL_GPL(flush_kthread_work);
641
642
643
644
645
646
647
648
649void flush_kthread_worker(struct kthread_worker *worker)
650{
651 struct kthread_flush_work fwork = {
652 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
653 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
654 };
655
656 queue_kthread_work(worker, &fwork.work);
657 wait_for_completion(&fwork.done);
658}
659EXPORT_SYMBOL_GPL(flush_kthread_worker);
660