1
2
3
4
5
6
7
8#include <uapi/linux/sched/types.h>
9#include <linux/sched.h>
10#include <linux/sched/task.h>
11#include <linux/kthread.h>
12#include <linux/completion.h>
13#include <linux/err.h>
14#include <linux/cpuset.h>
15#include <linux/unistd.h>
16#include <linux/file.h>
17#include <linux/export.h>
18#include <linux/mutex.h>
19#include <linux/slab.h>
20#include <linux/freezer.h>
21#include <linux/ptrace.h>
22#include <linux/uaccess.h>
23#include <linux/cgroup.h>
24#include <trace/events/sched.h>
25
26static DEFINE_SPINLOCK(kthread_create_lock);
27static LIST_HEAD(kthread_create_list);
28struct task_struct *kthreadd_task;
29
30struct kthread_create_info
31{
32
33 int (*threadfn)(void *data);
34 void *data;
35 int node;
36
37
38 struct task_struct *result;
39 struct completion *done;
40
41 struct list_head list;
42};
43
44struct kthread {
45 unsigned long flags;
46 unsigned int cpu;
47 void *data;
48 struct completion parked;
49 struct completion exited;
50};
51
52enum KTHREAD_BITS {
53 KTHREAD_IS_PER_CPU = 0,
54 KTHREAD_SHOULD_STOP,
55 KTHREAD_SHOULD_PARK,
56 KTHREAD_IS_PARKED,
57};
58
59static inline void set_kthread_struct(void *kthread)
60{
61
62
63
64
65
66 current->set_child_tid = (__force void __user *)kthread;
67}
68
69static inline struct kthread *to_kthread(struct task_struct *k)
70{
71 WARN_ON(!(k->flags & PF_KTHREAD));
72 return (__force void *)k->set_child_tid;
73}
74
75void free_kthread_struct(struct task_struct *k)
76{
77
78
79
80
81 kfree(to_kthread(k));
82}
83
84
85
86
87
88
89
90
91bool kthread_should_stop(void)
92{
93 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
94}
95EXPORT_SYMBOL(kthread_should_stop);
96
97
98
99
100
101
102
103
104
105
106
107
108bool kthread_should_park(void)
109{
110 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
111}
112EXPORT_SYMBOL_GPL(kthread_should_park);
113
114
115
116
117
118
119
120
121
122
123bool kthread_freezable_should_stop(bool *was_frozen)
124{
125 bool frozen = false;
126
127 might_sleep();
128
129 if (unlikely(freezing(current)))
130 frozen = __refrigerator(true);
131
132 if (was_frozen)
133 *was_frozen = frozen;
134
135 return kthread_should_stop();
136}
137EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
138
139
140
141
142
143
144
145
146
147void *kthread_data(struct task_struct *task)
148{
149 return to_kthread(task)->data;
150}
151
152
153
154
155
156
157
158
159
160
161void *kthread_probe_data(struct task_struct *task)
162{
163 struct kthread *kthread = to_kthread(task);
164 void *data = NULL;
165
166 probe_kernel_read(&data, &kthread->data, sizeof(data));
167 return data;
168}
169
170static void __kthread_parkme(struct kthread *self)
171{
172 __set_current_state(TASK_PARKED);
173 while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
174 if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
175 complete(&self->parked);
176 schedule();
177 __set_current_state(TASK_PARKED);
178 }
179 clear_bit(KTHREAD_IS_PARKED, &self->flags);
180 __set_current_state(TASK_RUNNING);
181}
182
183void kthread_parkme(void)
184{
185 __kthread_parkme(to_kthread(current));
186}
187EXPORT_SYMBOL_GPL(kthread_parkme);
188
189static int kthread(void *_create)
190{
191
192 struct kthread_create_info *create = _create;
193 int (*threadfn)(void *data) = create->threadfn;
194 void *data = create->data;
195 struct completion *done;
196 struct kthread *self;
197 int ret;
198
199 self = kmalloc(sizeof(*self), GFP_KERNEL);
200 set_kthread_struct(self);
201
202
203 done = xchg(&create->done, NULL);
204 if (!done) {
205 kfree(create);
206 do_exit(-EINTR);
207 }
208
209 if (!self) {
210 create->result = ERR_PTR(-ENOMEM);
211 complete(done);
212 do_exit(-ENOMEM);
213 }
214
215 self->flags = 0;
216 self->data = data;
217 init_completion(&self->exited);
218 init_completion(&self->parked);
219 current->vfork_done = &self->exited;
220
221
222 __set_current_state(TASK_UNINTERRUPTIBLE);
223 create->result = current;
224 complete(done);
225 schedule();
226
227 ret = -EINTR;
228 if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
229 cgroup_kthread_ready();
230 __kthread_parkme(self);
231 ret = threadfn(data);
232 }
233 do_exit(ret);
234}
235
236
237int tsk_fork_get_node(struct task_struct *tsk)
238{
239#ifdef CONFIG_NUMA
240 if (tsk == kthreadd_task)
241 return tsk->pref_node_fork;
242#endif
243 return NUMA_NO_NODE;
244}
245
246static void create_kthread(struct kthread_create_info *create)
247{
248 int pid;
249
250#ifdef CONFIG_NUMA
251 current->pref_node_fork = create->node;
252#endif
253
254 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
255 if (pid < 0) {
256
257 struct completion *done = xchg(&create->done, NULL);
258
259 if (!done) {
260 kfree(create);
261 return;
262 }
263 create->result = ERR_PTR(pid);
264 complete(done);
265 }
266}
267
268static __printf(4, 0)
269struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
270 void *data, int node,
271 const char namefmt[],
272 va_list args)
273{
274 DECLARE_COMPLETION_ONSTACK(done);
275 struct task_struct *task;
276 struct kthread_create_info *create = kmalloc(sizeof(*create),
277 GFP_KERNEL);
278
279 if (!create)
280 return ERR_PTR(-ENOMEM);
281 create->threadfn = threadfn;
282 create->data = data;
283 create->node = node;
284 create->done = &done;
285
286 spin_lock(&kthread_create_lock);
287 list_add_tail(&create->list, &kthread_create_list);
288 spin_unlock(&kthread_create_lock);
289
290 wake_up_process(kthreadd_task);
291
292
293
294
295
296 if (unlikely(wait_for_completion_killable(&done))) {
297
298
299
300
301
302 if (xchg(&create->done, NULL))
303 return ERR_PTR(-EINTR);
304
305
306
307
308 wait_for_completion(&done);
309 }
310 task = create->result;
311 if (!IS_ERR(task)) {
312 static const struct sched_param param = { .sched_priority = 0 };
313
314 vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
315
316
317
318
319 sched_setscheduler_nocheck(task, SCHED_NORMAL, ¶m);
320 set_cpus_allowed_ptr(task, cpu_all_mask);
321 }
322 kfree(create);
323 return task;
324}
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
350 void *data, int node,
351 const char namefmt[],
352 ...)
353{
354 struct task_struct *task;
355 va_list args;
356
357 va_start(args, namefmt);
358 task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
359 va_end(args);
360
361 return task;
362}
363EXPORT_SYMBOL(kthread_create_on_node);
364
365static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
366{
367 unsigned long flags;
368
369 if (!wait_task_inactive(p, state)) {
370 WARN_ON(1);
371 return;
372 }
373
374
375 raw_spin_lock_irqsave(&p->pi_lock, flags);
376 do_set_cpus_allowed(p, mask);
377 p->flags |= PF_NO_SETAFFINITY;
378 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
379}
380
381static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
382{
383 __kthread_bind_mask(p, cpumask_of(cpu), state);
384}
385
386void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
387{
388 __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
389}
390
391
392
393
394
395
396
397
398
399
400void kthread_bind(struct task_struct *p, unsigned int cpu)
401{
402 __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
403}
404EXPORT_SYMBOL(kthread_bind);
405
406
407
408
409
410
411
412
413
414
415
416
417struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
418 void *data, unsigned int cpu,
419 const char *namefmt)
420{
421 struct task_struct *p;
422
423 p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
424 cpu);
425 if (IS_ERR(p))
426 return p;
427 kthread_bind(p, cpu);
428
429 set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
430 to_kthread(p)->cpu = cpu;
431 return p;
432}
433
434
435
436
437
438
439
440
441
442void kthread_unpark(struct task_struct *k)
443{
444 struct kthread *kthread = to_kthread(k);
445
446 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
447
448
449
450
451
452
453 if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
454
455
456
457
458 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
459 __kthread_bind(k, kthread->cpu, TASK_PARKED);
460 wake_up_state(k, TASK_PARKED);
461 }
462}
463EXPORT_SYMBOL_GPL(kthread_unpark);
464
465
466
467
468
469
470
471
472
473
474
475
476
477int kthread_park(struct task_struct *k)
478{
479 struct kthread *kthread = to_kthread(k);
480
481 if (WARN_ON(k->flags & PF_EXITING))
482 return -ENOSYS;
483
484 if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
485 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
486 if (k != current) {
487 wake_up_process(k);
488 wait_for_completion(&kthread->parked);
489 }
490 }
491
492 return 0;
493}
494EXPORT_SYMBOL_GPL(kthread_park);
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511int kthread_stop(struct task_struct *k)
512{
513 struct kthread *kthread;
514 int ret;
515
516 trace_sched_kthread_stop(k);
517
518 get_task_struct(k);
519 kthread = to_kthread(k);
520 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
521 kthread_unpark(k);
522 wake_up_process(k);
523 wait_for_completion(&kthread->exited);
524 ret = k->exit_code;
525 put_task_struct(k);
526
527 trace_sched_kthread_stop_ret(ret);
528 return ret;
529}
530EXPORT_SYMBOL(kthread_stop);
531
532int kthreadd(void *unused)
533{
534 struct task_struct *tsk = current;
535
536
537 set_task_comm(tsk, "kthreadd");
538 ignore_signals(tsk);
539 set_cpus_allowed_ptr(tsk, cpu_all_mask);
540 set_mems_allowed(node_states[N_MEMORY]);
541
542 current->flags |= PF_NOFREEZE;
543 cgroup_init_kthreadd();
544
545 for (;;) {
546 set_current_state(TASK_INTERRUPTIBLE);
547 if (list_empty(&kthread_create_list))
548 schedule();
549 __set_current_state(TASK_RUNNING);
550
551 spin_lock(&kthread_create_lock);
552 while (!list_empty(&kthread_create_list)) {
553 struct kthread_create_info *create;
554
555 create = list_entry(kthread_create_list.next,
556 struct kthread_create_info, list);
557 list_del_init(&create->list);
558 spin_unlock(&kthread_create_lock);
559
560 create_kthread(create);
561
562 spin_lock(&kthread_create_lock);
563 }
564 spin_unlock(&kthread_create_lock);
565 }
566
567 return 0;
568}
569
570void __kthread_init_worker(struct kthread_worker *worker,
571 const char *name,
572 struct lock_class_key *key)
573{
574 memset(worker, 0, sizeof(struct kthread_worker));
575 spin_lock_init(&worker->lock);
576 lockdep_set_class_and_name(&worker->lock, key, name);
577 INIT_LIST_HEAD(&worker->work_list);
578 INIT_LIST_HEAD(&worker->delayed_work_list);
579}
580EXPORT_SYMBOL_GPL(__kthread_init_worker);
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597int kthread_worker_fn(void *worker_ptr)
598{
599 struct kthread_worker *worker = worker_ptr;
600 struct kthread_work *work;
601
602
603
604
605
606 WARN_ON(worker->task && worker->task != current);
607 worker->task = current;
608
609 if (worker->flags & KTW_FREEZABLE)
610 set_freezable();
611
612repeat:
613 set_current_state(TASK_INTERRUPTIBLE);
614
615 if (kthread_should_stop()) {
616 __set_current_state(TASK_RUNNING);
617 spin_lock_irq(&worker->lock);
618 worker->task = NULL;
619 spin_unlock_irq(&worker->lock);
620 return 0;
621 }
622
623 work = NULL;
624 spin_lock_irq(&worker->lock);
625 if (!list_empty(&worker->work_list)) {
626 work = list_first_entry(&worker->work_list,
627 struct kthread_work, node);
628 list_del_init(&work->node);
629 }
630 worker->current_work = work;
631 spin_unlock_irq(&worker->lock);
632
633 if (work) {
634 __set_current_state(TASK_RUNNING);
635 work->func(work);
636 } else if (!freezing(current))
637 schedule();
638
639 try_to_freeze();
640 goto repeat;
641}
642EXPORT_SYMBOL_GPL(kthread_worker_fn);
643
644static __printf(3, 0) struct kthread_worker *
645__kthread_create_worker(int cpu, unsigned int flags,
646 const char namefmt[], va_list args)
647{
648 struct kthread_worker *worker;
649 struct task_struct *task;
650 int node = -1;
651
652 worker = kzalloc(sizeof(*worker), GFP_KERNEL);
653 if (!worker)
654 return ERR_PTR(-ENOMEM);
655
656 kthread_init_worker(worker);
657
658 if (cpu >= 0)
659 node = cpu_to_node(cpu);
660
661 task = __kthread_create_on_node(kthread_worker_fn, worker,
662 node, namefmt, args);
663 if (IS_ERR(task))
664 goto fail_task;
665
666 if (cpu >= 0)
667 kthread_bind(task, cpu);
668
669 worker->flags = flags;
670 worker->task = task;
671 wake_up_process(task);
672 return worker;
673
674fail_task:
675 kfree(worker);
676 return ERR_CAST(task);
677}
678
679
680
681
682
683
684
685
686
687
688struct kthread_worker *
689kthread_create_worker(unsigned int flags, const char namefmt[], ...)
690{
691 struct kthread_worker *worker;
692 va_list args;
693
694 va_start(args, namefmt);
695 worker = __kthread_create_worker(-1, flags, namefmt, args);
696 va_end(args);
697
698 return worker;
699}
700EXPORT_SYMBOL(kthread_create_worker);
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719struct kthread_worker *
720kthread_create_worker_on_cpu(int cpu, unsigned int flags,
721 const char namefmt[], ...)
722{
723 struct kthread_worker *worker;
724 va_list args;
725
726 va_start(args, namefmt);
727 worker = __kthread_create_worker(cpu, flags, namefmt, args);
728 va_end(args);
729
730 return worker;
731}
732EXPORT_SYMBOL(kthread_create_worker_on_cpu);
733
734
735
736
737
738
739static inline bool queuing_blocked(struct kthread_worker *worker,
740 struct kthread_work *work)
741{
742 lockdep_assert_held(&worker->lock);
743
744 return !list_empty(&work->node) || work->canceling;
745}
746
747static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
748 struct kthread_work *work)
749{
750 lockdep_assert_held(&worker->lock);
751 WARN_ON_ONCE(!list_empty(&work->node));
752
753 WARN_ON_ONCE(work->worker && work->worker != worker);
754}
755
756
757static void kthread_insert_work(struct kthread_worker *worker,
758 struct kthread_work *work,
759 struct list_head *pos)
760{
761 kthread_insert_work_sanity_check(worker, work);
762
763 list_add_tail(&work->node, pos);
764 work->worker = worker;
765 if (!worker->current_work && likely(worker->task))
766 wake_up_process(worker->task);
767}
768
769
770
771
772
773
774
775
776
777
778
779
780
781bool kthread_queue_work(struct kthread_worker *worker,
782 struct kthread_work *work)
783{
784 bool ret = false;
785 unsigned long flags;
786
787 spin_lock_irqsave(&worker->lock, flags);
788 if (!queuing_blocked(worker, work)) {
789 kthread_insert_work(worker, work, &worker->work_list);
790 ret = true;
791 }
792 spin_unlock_irqrestore(&worker->lock, flags);
793 return ret;
794}
795EXPORT_SYMBOL_GPL(kthread_queue_work);
796
797
798
799
800
801
802
803
804
805void kthread_delayed_work_timer_fn(unsigned long __data)
806{
807 struct kthread_delayed_work *dwork =
808 (struct kthread_delayed_work *)__data;
809 struct kthread_work *work = &dwork->work;
810 struct kthread_worker *worker = work->worker;
811
812
813
814
815
816 if (WARN_ON_ONCE(!worker))
817 return;
818
819 spin_lock(&worker->lock);
820
821 WARN_ON_ONCE(work->worker != worker);
822
823
824 WARN_ON_ONCE(list_empty(&work->node));
825 list_del_init(&work->node);
826 kthread_insert_work(worker, work, &worker->work_list);
827
828 spin_unlock(&worker->lock);
829}
830EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
831
832void __kthread_queue_delayed_work(struct kthread_worker *worker,
833 struct kthread_delayed_work *dwork,
834 unsigned long delay)
835{
836 struct timer_list *timer = &dwork->timer;
837 struct kthread_work *work = &dwork->work;
838
839 WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn ||
840 timer->data != (unsigned long)dwork);
841
842
843
844
845
846
847
848 if (!delay) {
849 kthread_insert_work(worker, work, &worker->work_list);
850 return;
851 }
852
853
854 kthread_insert_work_sanity_check(worker, work);
855
856 list_add(&work->node, &worker->delayed_work_list);
857 work->worker = worker;
858 timer->expires = jiffies + delay;
859 add_timer(timer);
860}
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877bool kthread_queue_delayed_work(struct kthread_worker *worker,
878 struct kthread_delayed_work *dwork,
879 unsigned long delay)
880{
881 struct kthread_work *work = &dwork->work;
882 unsigned long flags;
883 bool ret = false;
884
885 spin_lock_irqsave(&worker->lock, flags);
886
887 if (!queuing_blocked(worker, work)) {
888 __kthread_queue_delayed_work(worker, dwork, delay);
889 ret = true;
890 }
891
892 spin_unlock_irqrestore(&worker->lock, flags);
893 return ret;
894}
895EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
896
897struct kthread_flush_work {
898 struct kthread_work work;
899 struct completion done;
900};
901
902static void kthread_flush_work_fn(struct kthread_work *work)
903{
904 struct kthread_flush_work *fwork =
905 container_of(work, struct kthread_flush_work, work);
906 complete(&fwork->done);
907}
908
909
910
911
912
913
914
915void kthread_flush_work(struct kthread_work *work)
916{
917 struct kthread_flush_work fwork = {
918 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
919 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
920 };
921 struct kthread_worker *worker;
922 bool noop = false;
923
924 worker = work->worker;
925 if (!worker)
926 return;
927
928 spin_lock_irq(&worker->lock);
929
930 WARN_ON_ONCE(work->worker != worker);
931
932 if (!list_empty(&work->node))
933 kthread_insert_work(worker, &fwork.work, work->node.next);
934 else if (worker->current_work == work)
935 kthread_insert_work(worker, &fwork.work,
936 worker->work_list.next);
937 else
938 noop = true;
939
940 spin_unlock_irq(&worker->lock);
941
942 if (!noop)
943 wait_for_completion(&fwork.done);
944}
945EXPORT_SYMBOL_GPL(kthread_flush_work);
946
947
948
949
950
951
952
953
954
955
956
957static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
958 unsigned long *flags)
959{
960
961 if (is_dwork) {
962 struct kthread_delayed_work *dwork =
963 container_of(work, struct kthread_delayed_work, work);
964 struct kthread_worker *worker = work->worker;
965
966
967
968
969
970
971
972 work->canceling++;
973 spin_unlock_irqrestore(&worker->lock, *flags);
974 del_timer_sync(&dwork->timer);
975 spin_lock_irqsave(&worker->lock, *flags);
976 work->canceling--;
977 }
978
979
980
981
982
983 if (!list_empty(&work->node)) {
984 list_del_init(&work->node);
985 return true;
986 }
987
988 return false;
989}
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014bool kthread_mod_delayed_work(struct kthread_worker *worker,
1015 struct kthread_delayed_work *dwork,
1016 unsigned long delay)
1017{
1018 struct kthread_work *work = &dwork->work;
1019 unsigned long flags;
1020 int ret = false;
1021
1022 spin_lock_irqsave(&worker->lock, flags);
1023
1024
1025 if (!work->worker)
1026 goto fast_queue;
1027
1028
1029 WARN_ON_ONCE(work->worker != worker);
1030
1031
1032 if (work->canceling)
1033 goto out;
1034
1035 ret = __kthread_cancel_work(work, true, &flags);
1036fast_queue:
1037 __kthread_queue_delayed_work(worker, dwork, delay);
1038out:
1039 spin_unlock_irqrestore(&worker->lock, flags);
1040 return ret;
1041}
1042EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
1043
1044static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1045{
1046 struct kthread_worker *worker = work->worker;
1047 unsigned long flags;
1048 int ret = false;
1049
1050 if (!worker)
1051 goto out;
1052
1053 spin_lock_irqsave(&worker->lock, flags);
1054
1055 WARN_ON_ONCE(work->worker != worker);
1056
1057 ret = __kthread_cancel_work(work, is_dwork, &flags);
1058
1059 if (worker->current_work != work)
1060 goto out_fast;
1061
1062
1063
1064
1065
1066 work->canceling++;
1067 spin_unlock_irqrestore(&worker->lock, flags);
1068 kthread_flush_work(work);
1069 spin_lock_irqsave(&worker->lock, flags);
1070 work->canceling--;
1071
1072out_fast:
1073 spin_unlock_irqrestore(&worker->lock, flags);
1074out:
1075 return ret;
1076}
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094bool kthread_cancel_work_sync(struct kthread_work *work)
1095{
1096 return __kthread_cancel_work_sync(work, false);
1097}
1098EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
1110{
1111 return __kthread_cancel_work_sync(&dwork->work, true);
1112}
1113EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
1114
1115
1116
1117
1118
1119
1120
1121
1122void kthread_flush_worker(struct kthread_worker *worker)
1123{
1124 struct kthread_flush_work fwork = {
1125 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1126 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1127 };
1128
1129 kthread_queue_work(worker, &fwork.work);
1130 wait_for_completion(&fwork.done);
1131}
1132EXPORT_SYMBOL_GPL(kthread_flush_worker);
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142void kthread_destroy_worker(struct kthread_worker *worker)
1143{
1144 struct task_struct *task;
1145
1146 task = worker->task;
1147 if (WARN_ON(!task))
1148 return;
1149
1150 kthread_flush_worker(worker);
1151 kthread_stop(task);
1152 WARN_ON(!list_empty(&worker->work_list));
1153 kfree(worker);
1154}
1155EXPORT_SYMBOL(kthread_destroy_worker);
1156