1
2
3
4
5
6
7
8#include <linux/sched.h>
9#include <linux/kthread.h>
10#include <linux/completion.h>
11#include <linux/err.h>
12#include <linux/cpuset.h>
13#include <linux/unistd.h>
14#include <linux/file.h>
15#include <linux/export.h>
16#include <linux/mutex.h>
17#include <linux/slab.h>
18#include <linux/freezer.h>
19#include <linux/ptrace.h>
20#include <linux/uaccess.h>
21#include <trace/events/sched.h>
22
23static DEFINE_SPINLOCK(kthread_create_lock);
24static LIST_HEAD(kthread_create_list);
25struct task_struct *kthreadd_task;
26
27struct kthread_create_info
28{
29
30 int (*threadfn)(void *data);
31 void *data;
32 int node;
33
34
35 struct task_struct *result;
36 struct completion *done;
37
38 struct list_head list;
39};
40
41struct kthread {
42 unsigned long flags;
43 unsigned int cpu;
44 void *data;
45 struct completion parked;
46 struct completion exited;
47};
48
49enum KTHREAD_BITS {
50 KTHREAD_IS_PER_CPU = 0,
51 KTHREAD_SHOULD_STOP,
52 KTHREAD_SHOULD_PARK,
53 KTHREAD_IS_PARKED,
54};
55
56#define __to_kthread(vfork) \
57 container_of(vfork, struct kthread, exited)
58
59static inline struct kthread *to_kthread(struct task_struct *k)
60{
61 return __to_kthread(k->vfork_done);
62}
63
64static struct kthread *to_live_kthread(struct task_struct *k)
65{
66 struct completion *vfork = ACCESS_ONCE(k->vfork_done);
67 if (likely(vfork))
68 return __to_kthread(vfork);
69 return NULL;
70}
71
72
73
74
75
76
77
78
79bool kthread_should_stop(void)
80{
81 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
82}
83EXPORT_SYMBOL(kthread_should_stop);
84
85
86
87
88
89
90
91
92
93
94
95
96bool kthread_should_park(void)
97{
98 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
99}
100
101
102
103
104
105
106
107
108
109
110bool kthread_freezable_should_stop(bool *was_frozen)
111{
112 bool frozen = false;
113
114 might_sleep();
115
116 if (unlikely(freezing(current)))
117 frozen = __refrigerator(true);
118
119 if (was_frozen)
120 *was_frozen = frozen;
121
122 return kthread_should_stop();
123}
124EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
125
126
127
128
129
130
131
132
133
134void *kthread_data(struct task_struct *task)
135{
136 return to_kthread(task)->data;
137}
138
139
140
141
142
143
144
145
146
147
148void *probe_kthread_data(struct task_struct *task)
149{
150 struct kthread *kthread = to_kthread(task);
151 void *data = NULL;
152
153 probe_kernel_read(&data, &kthread->data, sizeof(data));
154 return data;
155}
156
157static void __kthread_parkme(struct kthread *self)
158{
159 __set_current_state(TASK_PARKED);
160 while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
161 if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
162 complete(&self->parked);
163 schedule();
164 __set_current_state(TASK_PARKED);
165 }
166 clear_bit(KTHREAD_IS_PARKED, &self->flags);
167 __set_current_state(TASK_RUNNING);
168}
169
170void kthread_parkme(void)
171{
172 __kthread_parkme(to_kthread(current));
173}
174
175static int kthread(void *_create)
176{
177
178 struct kthread_create_info *create = _create;
179 int (*threadfn)(void *data) = create->threadfn;
180 void *data = create->data;
181 struct completion *done;
182 struct kthread self;
183 int ret;
184
185 self.flags = 0;
186 self.data = data;
187 init_completion(&self.exited);
188 init_completion(&self.parked);
189 current->vfork_done = &self.exited;
190
191
192 done = xchg(&create->done, NULL);
193 if (!done) {
194 kfree(create);
195 do_exit(-EINTR);
196 }
197
198 __set_current_state(TASK_UNINTERRUPTIBLE);
199 create->result = current;
200 complete(done);
201 schedule();
202
203 ret = -EINTR;
204
205 if (!test_bit(KTHREAD_SHOULD_STOP, &self.flags)) {
206 __kthread_parkme(&self);
207 ret = threadfn(data);
208 }
209
210 do_exit(ret);
211}
212
213
214int tsk_fork_get_node(struct task_struct *tsk)
215{
216#ifdef CONFIG_NUMA
217 if (tsk == kthreadd_task)
218 return tsk->pref_node_fork;
219#endif
220 return numa_node_id();
221}
222
223static void create_kthread(struct kthread_create_info *create)
224{
225 int pid;
226
227#ifdef CONFIG_NUMA
228 current->pref_node_fork = create->node;
229#endif
230
231 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
232 if (pid < 0) {
233
234 struct completion *done = xchg(&create->done, NULL);
235
236 if (!done) {
237 kfree(create);
238 return;
239 }
240 create->result = ERR_PTR(pid);
241 complete(done);
242 }
243}
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
268 void *data, int node,
269 const char namefmt[],
270 ...)
271{
272 DECLARE_COMPLETION_ONSTACK(done);
273 struct task_struct *task;
274 struct kthread_create_info *create = kmalloc(sizeof(*create),
275 GFP_KERNEL);
276
277 if (!create)
278 return ERR_PTR(-ENOMEM);
279 create->threadfn = threadfn;
280 create->data = data;
281 create->node = node;
282 create->done = &done;
283
284 spin_lock(&kthread_create_lock);
285 list_add_tail(&create->list, &kthread_create_list);
286 spin_unlock(&kthread_create_lock);
287
288 wake_up_process(kthreadd_task);
289
290
291
292
293
294 if (unlikely(wait_for_completion_killable(&done))) {
295
296
297
298
299
300 if (xchg(&create->done, NULL))
301 return ERR_PTR(-ENOMEM);
302
303
304
305
306 wait_for_completion(&done);
307 }
308 task = create->result;
309 if (!IS_ERR(task)) {
310 static const struct sched_param param = { .sched_priority = 0 };
311 va_list args;
312
313 va_start(args, namefmt);
314 vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
315 va_end(args);
316
317
318
319
320 sched_setscheduler_nocheck(task, SCHED_NORMAL, ¶m);
321 set_cpus_allowed_ptr(task, cpu_all_mask);
322 }
323 kfree(create);
324 return task;
325}
326EXPORT_SYMBOL(kthread_create_on_node);
327
328static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
329{
330
331 if (!wait_task_inactive(p, state)) {
332 WARN_ON(1);
333 return;
334 }
335
336 do_set_cpus_allowed(p, cpumask_of(cpu));
337 p->flags |= PF_NO_SETAFFINITY;
338}
339
340
341
342
343
344
345
346
347
348
349void kthread_bind(struct task_struct *p, unsigned int cpu)
350{
351 __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
352}
353EXPORT_SYMBOL(kthread_bind);
354
355
356
357
358
359
360
361
362
363
364
365
366struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
367 void *data, unsigned int cpu,
368 const char *namefmt)
369{
370 struct task_struct *p;
371
372 p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
373 cpu);
374 if (IS_ERR(p))
375 return p;
376 set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
377 to_kthread(p)->cpu = cpu;
378
379 kthread_park(p);
380 return p;
381}
382
383static void __kthread_unpark(struct task_struct *k, struct kthread *kthread)
384{
385 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
386
387
388
389
390
391
392 if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
393 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
394 __kthread_bind(k, kthread->cpu, TASK_PARKED);
395 wake_up_state(k, TASK_PARKED);
396 }
397}
398
399
400
401
402
403
404
405
406
407void kthread_unpark(struct task_struct *k)
408{
409 struct kthread *kthread = to_live_kthread(k);
410
411 if (kthread)
412 __kthread_unpark(k, kthread);
413}
414
415
416
417
418
419
420
421
422
423
424
425
426
427int kthread_park(struct task_struct *k)
428{
429 struct kthread *kthread = to_live_kthread(k);
430 int ret = -ENOSYS;
431
432 if (kthread) {
433 if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
434 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
435 if (k != current) {
436 wake_up_process(k);
437 wait_for_completion(&kthread->parked);
438 }
439 }
440 ret = 0;
441 }
442 return ret;
443}
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460int kthread_stop(struct task_struct *k)
461{
462 struct kthread *kthread;
463 int ret;
464
465 trace_sched_kthread_stop(k);
466
467 get_task_struct(k);
468 kthread = to_live_kthread(k);
469 if (kthread) {
470 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
471 __kthread_unpark(k, kthread);
472 wake_up_process(k);
473 wait_for_completion(&kthread->exited);
474 }
475 ret = k->exit_code;
476 put_task_struct(k);
477
478 trace_sched_kthread_stop_ret(ret);
479 return ret;
480}
481EXPORT_SYMBOL(kthread_stop);
482
483int kthreadd(void *unused)
484{
485 struct task_struct *tsk = current;
486
487
488 set_task_comm(tsk, "kthreadd");
489 ignore_signals(tsk);
490 set_cpus_allowed_ptr(tsk, cpu_all_mask);
491 set_mems_allowed(node_states[N_MEMORY]);
492
493 current->flags |= PF_NOFREEZE;
494
495 for (;;) {
496 set_current_state(TASK_INTERRUPTIBLE);
497 if (list_empty(&kthread_create_list))
498 schedule();
499 __set_current_state(TASK_RUNNING);
500
501 spin_lock(&kthread_create_lock);
502 while (!list_empty(&kthread_create_list)) {
503 struct kthread_create_info *create;
504
505 create = list_entry(kthread_create_list.next,
506 struct kthread_create_info, list);
507 list_del_init(&create->list);
508 spin_unlock(&kthread_create_lock);
509
510 create_kthread(create);
511
512 spin_lock(&kthread_create_lock);
513 }
514 spin_unlock(&kthread_create_lock);
515 }
516
517 return 0;
518}
519
520void __init_kthread_worker(struct kthread_worker *worker,
521 const char *name,
522 struct lock_class_key *key)
523{
524 spin_lock_init(&worker->lock);
525 lockdep_set_class_and_name(&worker->lock, key, name);
526 INIT_LIST_HEAD(&worker->work_list);
527 worker->task = NULL;
528}
529EXPORT_SYMBOL_GPL(__init_kthread_worker);
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546int kthread_worker_fn(void *worker_ptr)
547{
548 struct kthread_worker *worker = worker_ptr;
549 struct kthread_work *work;
550
551 WARN_ON(worker->task);
552 worker->task = current;
553repeat:
554 set_current_state(TASK_INTERRUPTIBLE);
555
556 if (kthread_should_stop()) {
557 __set_current_state(TASK_RUNNING);
558 spin_lock_irq(&worker->lock);
559 worker->task = NULL;
560 spin_unlock_irq(&worker->lock);
561 return 0;
562 }
563
564 work = NULL;
565 spin_lock_irq(&worker->lock);
566 if (!list_empty(&worker->work_list)) {
567 work = list_first_entry(&worker->work_list,
568 struct kthread_work, node);
569 list_del_init(&work->node);
570 }
571 worker->current_work = work;
572 spin_unlock_irq(&worker->lock);
573
574 if (work) {
575 __set_current_state(TASK_RUNNING);
576 work->func(work);
577 } else if (!freezing(current))
578 schedule();
579
580 try_to_freeze();
581 goto repeat;
582}
583EXPORT_SYMBOL_GPL(kthread_worker_fn);
584
585
586static void insert_kthread_work(struct kthread_worker *worker,
587 struct kthread_work *work,
588 struct list_head *pos)
589{
590 lockdep_assert_held(&worker->lock);
591
592 list_add_tail(&work->node, pos);
593 work->worker = worker;
594 if (likely(worker->task))
595 wake_up_process(worker->task);
596}
597
598
599
600
601
602
603
604
605
606
607bool queue_kthread_work(struct kthread_worker *worker,
608 struct kthread_work *work)
609{
610 bool ret = false;
611 unsigned long flags;
612
613 spin_lock_irqsave(&worker->lock, flags);
614 if (list_empty(&work->node)) {
615 insert_kthread_work(worker, work, &worker->work_list);
616 ret = true;
617 }
618 spin_unlock_irqrestore(&worker->lock, flags);
619 return ret;
620}
621EXPORT_SYMBOL_GPL(queue_kthread_work);
622
623struct kthread_flush_work {
624 struct kthread_work work;
625 struct completion done;
626};
627
628static void kthread_flush_work_fn(struct kthread_work *work)
629{
630 struct kthread_flush_work *fwork =
631 container_of(work, struct kthread_flush_work, work);
632 complete(&fwork->done);
633}
634
635
636
637
638
639
640
641void flush_kthread_work(struct kthread_work *work)
642{
643 struct kthread_flush_work fwork = {
644 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
645 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
646 };
647 struct kthread_worker *worker;
648 bool noop = false;
649
650retry:
651 worker = work->worker;
652 if (!worker)
653 return;
654
655 spin_lock_irq(&worker->lock);
656 if (work->worker != worker) {
657 spin_unlock_irq(&worker->lock);
658 goto retry;
659 }
660
661 if (!list_empty(&work->node))
662 insert_kthread_work(worker, &fwork.work, work->node.next);
663 else if (worker->current_work == work)
664 insert_kthread_work(worker, &fwork.work, worker->work_list.next);
665 else
666 noop = true;
667
668 spin_unlock_irq(&worker->lock);
669
670 if (!noop)
671 wait_for_completion(&fwork.done);
672}
673EXPORT_SYMBOL_GPL(flush_kthread_work);
674
675
676
677
678
679
680
681
682void flush_kthread_worker(struct kthread_worker *worker)
683{
684 struct kthread_flush_work fwork = {
685 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
686 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
687 };
688
689 queue_kthread_work(worker, &fwork.work);
690 wait_for_completion(&fwork.done);
691}
692EXPORT_SYMBOL_GPL(flush_kthread_worker);
693