1
2
3
4
5
6
7
8#include <linux/sched.h>
9#include <linux/kthread.h>
10#include <linux/completion.h>
11#include <linux/err.h>
12#include <linux/cpuset.h>
13#include <linux/unistd.h>
14#include <linux/file.h>
15#include <linux/export.h>
16#include <linux/mutex.h>
17#include <linux/slab.h>
18#include <linux/freezer.h>
19#include <linux/ptrace.h>
20#include <linux/uaccess.h>
21#include <trace/events/sched.h>
22
23static DEFINE_SPINLOCK(kthread_create_lock);
24static LIST_HEAD(kthread_create_list);
25struct task_struct *kthreadd_task;
26
27struct kthread_create_info
28{
29
30 int (*threadfn)(void *data);
31 void *data;
32 int node;
33
34
35 struct task_struct *result;
36 struct completion *done;
37
38 struct list_head list;
39};
40
41struct kthread {
42 unsigned long flags;
43 unsigned int cpu;
44 void *data;
45 struct completion parked;
46 struct completion exited;
47};
48
49enum KTHREAD_BITS {
50 KTHREAD_IS_PER_CPU = 0,
51 KTHREAD_SHOULD_STOP,
52 KTHREAD_SHOULD_PARK,
53 KTHREAD_IS_PARKED,
54};
55
56#define __to_kthread(vfork) \
57 container_of(vfork, struct kthread, exited)
58
59static inline struct kthread *to_kthread(struct task_struct *k)
60{
61 return __to_kthread(k->vfork_done);
62}
63
64static struct kthread *to_live_kthread(struct task_struct *k)
65{
66 struct completion *vfork = ACCESS_ONCE(k->vfork_done);
67 if (likely(vfork) && try_get_task_stack(k))
68 return __to_kthread(vfork);
69 return NULL;
70}
71
72
73
74
75
76
77
78
79bool kthread_should_stop(void)
80{
81 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
82}
83EXPORT_SYMBOL(kthread_should_stop);
84
85
86
87
88
89
90
91
92
93
94
95
96bool kthread_should_park(void)
97{
98 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
99}
100EXPORT_SYMBOL_GPL(kthread_should_park);
101
102
103
104
105
106
107
108
109
110
111bool kthread_freezable_should_stop(bool *was_frozen)
112{
113 bool frozen = false;
114
115 might_sleep();
116
117 if (unlikely(freezing(current)))
118 frozen = __refrigerator(true);
119
120 if (was_frozen)
121 *was_frozen = frozen;
122
123 return kthread_should_stop();
124}
125EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
126
127
128
129
130
131
132
133
134
135void *kthread_data(struct task_struct *task)
136{
137 return to_kthread(task)->data;
138}
139
140
141
142
143
144
145
146
147
148
149void *kthread_probe_data(struct task_struct *task)
150{
151 struct kthread *kthread = to_kthread(task);
152 void *data = NULL;
153
154 probe_kernel_read(&data, &kthread->data, sizeof(data));
155 return data;
156}
157
158static void __kthread_parkme(struct kthread *self)
159{
160 __set_current_state(TASK_PARKED);
161 while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
162 if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
163 complete(&self->parked);
164 schedule();
165 __set_current_state(TASK_PARKED);
166 }
167 clear_bit(KTHREAD_IS_PARKED, &self->flags);
168 __set_current_state(TASK_RUNNING);
169}
170
171void kthread_parkme(void)
172{
173 __kthread_parkme(to_kthread(current));
174}
175EXPORT_SYMBOL_GPL(kthread_parkme);
176
177static int kthread(void *_create)
178{
179
180 struct kthread_create_info *create = _create;
181 int (*threadfn)(void *data) = create->threadfn;
182 void *data = create->data;
183 struct completion *done;
184 struct kthread self;
185 int ret;
186
187 self.flags = 0;
188 self.data = data;
189 init_completion(&self.exited);
190 init_completion(&self.parked);
191 current->vfork_done = &self.exited;
192
193
194 done = xchg(&create->done, NULL);
195 if (!done) {
196 kfree(create);
197 do_exit(-EINTR);
198 }
199
200 __set_current_state(TASK_UNINTERRUPTIBLE);
201 create->result = current;
202 complete(done);
203 schedule();
204
205 ret = -EINTR;
206
207 if (!test_bit(KTHREAD_SHOULD_STOP, &self.flags)) {
208 __kthread_parkme(&self);
209 ret = threadfn(data);
210 }
211
212 do_exit(ret);
213}
214
215
216int tsk_fork_get_node(struct task_struct *tsk)
217{
218#ifdef CONFIG_NUMA
219 if (tsk == kthreadd_task)
220 return tsk->pref_node_fork;
221#endif
222 return NUMA_NO_NODE;
223}
224
225static void create_kthread(struct kthread_create_info *create)
226{
227 int pid;
228
229#ifdef CONFIG_NUMA
230 current->pref_node_fork = create->node;
231#endif
232
233 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
234 if (pid < 0) {
235
236 struct completion *done = xchg(&create->done, NULL);
237
238 if (!done) {
239 kfree(create);
240 return;
241 }
242 create->result = ERR_PTR(pid);
243 complete(done);
244 }
245}
246
247static struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
248 void *data, int node,
249 const char namefmt[],
250 va_list args)
251{
252 DECLARE_COMPLETION_ONSTACK(done);
253 struct task_struct *task;
254 struct kthread_create_info *create = kmalloc(sizeof(*create),
255 GFP_KERNEL);
256
257 if (!create)
258 return ERR_PTR(-ENOMEM);
259 create->threadfn = threadfn;
260 create->data = data;
261 create->node = node;
262 create->done = &done;
263
264 spin_lock(&kthread_create_lock);
265 list_add_tail(&create->list, &kthread_create_list);
266 spin_unlock(&kthread_create_lock);
267
268 wake_up_process(kthreadd_task);
269
270
271
272
273
274 if (unlikely(wait_for_completion_killable(&done))) {
275
276
277
278
279
280 if (xchg(&create->done, NULL))
281 return ERR_PTR(-EINTR);
282
283
284
285
286 wait_for_completion(&done);
287 }
288 task = create->result;
289 if (!IS_ERR(task)) {
290 static const struct sched_param param = { .sched_priority = 0 };
291
292 vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
293
294
295
296
297 sched_setscheduler_nocheck(task, SCHED_NORMAL, ¶m);
298 set_cpus_allowed_ptr(task, cpu_all_mask);
299 }
300 kfree(create);
301 return task;
302}
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
328 void *data, int node,
329 const char namefmt[],
330 ...)
331{
332 struct task_struct *task;
333 va_list args;
334
335 va_start(args, namefmt);
336 task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
337 va_end(args);
338
339 return task;
340}
341EXPORT_SYMBOL(kthread_create_on_node);
342
343static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
344{
345 unsigned long flags;
346
347 if (!wait_task_inactive(p, state)) {
348 WARN_ON(1);
349 return;
350 }
351
352
353 raw_spin_lock_irqsave(&p->pi_lock, flags);
354 do_set_cpus_allowed(p, mask);
355 p->flags |= PF_NO_SETAFFINITY;
356 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
357}
358
359static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
360{
361 __kthread_bind_mask(p, cpumask_of(cpu), state);
362}
363
364void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
365{
366 __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
367}
368
369
370
371
372
373
374
375
376
377
378void kthread_bind(struct task_struct *p, unsigned int cpu)
379{
380 __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
381}
382EXPORT_SYMBOL(kthread_bind);
383
384
385
386
387
388
389
390
391
392
393
394
395struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
396 void *data, unsigned int cpu,
397 const char *namefmt)
398{
399 struct task_struct *p;
400
401 p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
402 cpu);
403 if (IS_ERR(p))
404 return p;
405 kthread_bind(p, cpu);
406
407 set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
408 to_kthread(p)->cpu = cpu;
409 return p;
410}
411
412static void __kthread_unpark(struct task_struct *k, struct kthread *kthread)
413{
414 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
415
416
417
418
419
420
421 if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
422
423
424
425
426 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
427 __kthread_bind(k, kthread->cpu, TASK_PARKED);
428 wake_up_state(k, TASK_PARKED);
429 }
430}
431
432
433
434
435
436
437
438
439
440void kthread_unpark(struct task_struct *k)
441{
442 struct kthread *kthread = to_live_kthread(k);
443
444 if (kthread) {
445 __kthread_unpark(k, kthread);
446 put_task_stack(k);
447 }
448}
449EXPORT_SYMBOL_GPL(kthread_unpark);
450
451
452
453
454
455
456
457
458
459
460
461
462
463int kthread_park(struct task_struct *k)
464{
465 struct kthread *kthread = to_live_kthread(k);
466 int ret = -ENOSYS;
467
468 if (kthread) {
469 if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
470 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
471 if (k != current) {
472 wake_up_process(k);
473 wait_for_completion(&kthread->parked);
474 }
475 }
476 put_task_stack(k);
477 ret = 0;
478 }
479 return ret;
480}
481EXPORT_SYMBOL_GPL(kthread_park);
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498int kthread_stop(struct task_struct *k)
499{
500 struct kthread *kthread;
501 int ret;
502
503 trace_sched_kthread_stop(k);
504
505 get_task_struct(k);
506 kthread = to_live_kthread(k);
507 if (kthread) {
508 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
509 __kthread_unpark(k, kthread);
510 wake_up_process(k);
511 wait_for_completion(&kthread->exited);
512 put_task_stack(k);
513 }
514 ret = k->exit_code;
515 put_task_struct(k);
516
517 trace_sched_kthread_stop_ret(ret);
518 return ret;
519}
520EXPORT_SYMBOL(kthread_stop);
521
522int kthreadd(void *unused)
523{
524 struct task_struct *tsk = current;
525
526
527 set_task_comm(tsk, "kthreadd");
528 ignore_signals(tsk);
529 set_cpus_allowed_ptr(tsk, cpu_all_mask);
530 set_mems_allowed(node_states[N_MEMORY]);
531
532 current->flags |= PF_NOFREEZE;
533
534 for (;;) {
535 set_current_state(TASK_INTERRUPTIBLE);
536 if (list_empty(&kthread_create_list))
537 schedule();
538 __set_current_state(TASK_RUNNING);
539
540 spin_lock(&kthread_create_lock);
541 while (!list_empty(&kthread_create_list)) {
542 struct kthread_create_info *create;
543
544 create = list_entry(kthread_create_list.next,
545 struct kthread_create_info, list);
546 list_del_init(&create->list);
547 spin_unlock(&kthread_create_lock);
548
549 create_kthread(create);
550
551 spin_lock(&kthread_create_lock);
552 }
553 spin_unlock(&kthread_create_lock);
554 }
555
556 return 0;
557}
558
559void __kthread_init_worker(struct kthread_worker *worker,
560 const char *name,
561 struct lock_class_key *key)
562{
563 memset(worker, 0, sizeof(struct kthread_worker));
564 spin_lock_init(&worker->lock);
565 lockdep_set_class_and_name(&worker->lock, key, name);
566 INIT_LIST_HEAD(&worker->work_list);
567 INIT_LIST_HEAD(&worker->delayed_work_list);
568}
569EXPORT_SYMBOL_GPL(__kthread_init_worker);
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586int kthread_worker_fn(void *worker_ptr)
587{
588 struct kthread_worker *worker = worker_ptr;
589 struct kthread_work *work;
590
591
592
593
594
595 WARN_ON(worker->task && worker->task != current);
596 worker->task = current;
597
598 if (worker->flags & KTW_FREEZABLE)
599 set_freezable();
600
601repeat:
602 set_current_state(TASK_INTERRUPTIBLE);
603
604 if (kthread_should_stop()) {
605 __set_current_state(TASK_RUNNING);
606 spin_lock_irq(&worker->lock);
607 worker->task = NULL;
608 spin_unlock_irq(&worker->lock);
609 return 0;
610 }
611
612 work = NULL;
613 spin_lock_irq(&worker->lock);
614 if (!list_empty(&worker->work_list)) {
615 work = list_first_entry(&worker->work_list,
616 struct kthread_work, node);
617 list_del_init(&work->node);
618 }
619 worker->current_work = work;
620 spin_unlock_irq(&worker->lock);
621
622 if (work) {
623 __set_current_state(TASK_RUNNING);
624 work->func(work);
625 } else if (!freezing(current))
626 schedule();
627
628 try_to_freeze();
629 goto repeat;
630}
631EXPORT_SYMBOL_GPL(kthread_worker_fn);
632
633static struct kthread_worker *
634__kthread_create_worker(int cpu, unsigned int flags,
635 const char namefmt[], va_list args)
636{
637 struct kthread_worker *worker;
638 struct task_struct *task;
639
640 worker = kzalloc(sizeof(*worker), GFP_KERNEL);
641 if (!worker)
642 return ERR_PTR(-ENOMEM);
643
644 kthread_init_worker(worker);
645
646 if (cpu >= 0) {
647 char name[TASK_COMM_LEN];
648
649
650
651
652
653
654 vsnprintf(name, sizeof(name), namefmt, args);
655 task = kthread_create_on_cpu(kthread_worker_fn, worker,
656 cpu, name);
657 } else {
658 task = __kthread_create_on_node(kthread_worker_fn, worker,
659 -1, namefmt, args);
660 }
661
662 if (IS_ERR(task))
663 goto fail_task;
664
665 worker->flags = flags;
666 worker->task = task;
667 wake_up_process(task);
668 return worker;
669
670fail_task:
671 kfree(worker);
672 return ERR_CAST(task);
673}
674
675
676
677
678
679
680
681
682
683
684struct kthread_worker *
685kthread_create_worker(unsigned int flags, const char namefmt[], ...)
686{
687 struct kthread_worker *worker;
688 va_list args;
689
690 va_start(args, namefmt);
691 worker = __kthread_create_worker(-1, flags, namefmt, args);
692 va_end(args);
693
694 return worker;
695}
696EXPORT_SYMBOL(kthread_create_worker);
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715struct kthread_worker *
716kthread_create_worker_on_cpu(int cpu, unsigned int flags,
717 const char namefmt[], ...)
718{
719 struct kthread_worker *worker;
720 va_list args;
721
722 va_start(args, namefmt);
723 worker = __kthread_create_worker(cpu, flags, namefmt, args);
724 va_end(args);
725
726 return worker;
727}
728EXPORT_SYMBOL(kthread_create_worker_on_cpu);
729
730
731
732
733
734
735static inline bool queuing_blocked(struct kthread_worker *worker,
736 struct kthread_work *work)
737{
738 lockdep_assert_held(&worker->lock);
739
740 return !list_empty(&work->node) || work->canceling;
741}
742
743static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
744 struct kthread_work *work)
745{
746 lockdep_assert_held(&worker->lock);
747 WARN_ON_ONCE(!list_empty(&work->node));
748
749 WARN_ON_ONCE(work->worker && work->worker != worker);
750}
751
752
753static void kthread_insert_work(struct kthread_worker *worker,
754 struct kthread_work *work,
755 struct list_head *pos)
756{
757 kthread_insert_work_sanity_check(worker, work);
758
759 list_add_tail(&work->node, pos);
760 work->worker = worker;
761 if (!worker->current_work && likely(worker->task))
762 wake_up_process(worker->task);
763}
764
765
766
767
768
769
770
771
772
773
774
775
776
777bool kthread_queue_work(struct kthread_worker *worker,
778 struct kthread_work *work)
779{
780 bool ret = false;
781 unsigned long flags;
782
783 spin_lock_irqsave(&worker->lock, flags);
784 if (!queuing_blocked(worker, work)) {
785 kthread_insert_work(worker, work, &worker->work_list);
786 ret = true;
787 }
788 spin_unlock_irqrestore(&worker->lock, flags);
789 return ret;
790}
791EXPORT_SYMBOL_GPL(kthread_queue_work);
792
793
794
795
796
797
798
799
800
801void kthread_delayed_work_timer_fn(unsigned long __data)
802{
803 struct kthread_delayed_work *dwork =
804 (struct kthread_delayed_work *)__data;
805 struct kthread_work *work = &dwork->work;
806 struct kthread_worker *worker = work->worker;
807
808
809
810
811
812 if (WARN_ON_ONCE(!worker))
813 return;
814
815 spin_lock(&worker->lock);
816
817 WARN_ON_ONCE(work->worker != worker);
818
819
820 WARN_ON_ONCE(list_empty(&work->node));
821 list_del_init(&work->node);
822 kthread_insert_work(worker, work, &worker->work_list);
823
824 spin_unlock(&worker->lock);
825}
826EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
827
828void __kthread_queue_delayed_work(struct kthread_worker *worker,
829 struct kthread_delayed_work *dwork,
830 unsigned long delay)
831{
832 struct timer_list *timer = &dwork->timer;
833 struct kthread_work *work = &dwork->work;
834
835 WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn ||
836 timer->data != (unsigned long)dwork);
837
838
839
840
841
842
843
844 if (!delay) {
845 kthread_insert_work(worker, work, &worker->work_list);
846 return;
847 }
848
849
850 kthread_insert_work_sanity_check(worker, work);
851
852 list_add(&work->node, &worker->delayed_work_list);
853 work->worker = worker;
854 timer_stats_timer_set_start_info(&dwork->timer);
855 timer->expires = jiffies + delay;
856 add_timer(timer);
857}
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874bool kthread_queue_delayed_work(struct kthread_worker *worker,
875 struct kthread_delayed_work *dwork,
876 unsigned long delay)
877{
878 struct kthread_work *work = &dwork->work;
879 unsigned long flags;
880 bool ret = false;
881
882 spin_lock_irqsave(&worker->lock, flags);
883
884 if (!queuing_blocked(worker, work)) {
885 __kthread_queue_delayed_work(worker, dwork, delay);
886 ret = true;
887 }
888
889 spin_unlock_irqrestore(&worker->lock, flags);
890 return ret;
891}
892EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
893
894struct kthread_flush_work {
895 struct kthread_work work;
896 struct completion done;
897};
898
899static void kthread_flush_work_fn(struct kthread_work *work)
900{
901 struct kthread_flush_work *fwork =
902 container_of(work, struct kthread_flush_work, work);
903 complete(&fwork->done);
904}
905
906
907
908
909
910
911
912void kthread_flush_work(struct kthread_work *work)
913{
914 struct kthread_flush_work fwork = {
915 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
916 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
917 };
918 struct kthread_worker *worker;
919 bool noop = false;
920
921 worker = work->worker;
922 if (!worker)
923 return;
924
925 spin_lock_irq(&worker->lock);
926
927 WARN_ON_ONCE(work->worker != worker);
928
929 if (!list_empty(&work->node))
930 kthread_insert_work(worker, &fwork.work, work->node.next);
931 else if (worker->current_work == work)
932 kthread_insert_work(worker, &fwork.work,
933 worker->work_list.next);
934 else
935 noop = true;
936
937 spin_unlock_irq(&worker->lock);
938
939 if (!noop)
940 wait_for_completion(&fwork.done);
941}
942EXPORT_SYMBOL_GPL(kthread_flush_work);
943
944
945
946
947
948
949
950
951
952
953
954static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
955 unsigned long *flags)
956{
957
958 if (is_dwork) {
959 struct kthread_delayed_work *dwork =
960 container_of(work, struct kthread_delayed_work, work);
961 struct kthread_worker *worker = work->worker;
962
963
964
965
966
967
968
969 work->canceling++;
970 spin_unlock_irqrestore(&worker->lock, *flags);
971 del_timer_sync(&dwork->timer);
972 spin_lock_irqsave(&worker->lock, *flags);
973 work->canceling--;
974 }
975
976
977
978
979
980 if (!list_empty(&work->node)) {
981 list_del_init(&work->node);
982 return true;
983 }
984
985 return false;
986}
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011bool kthread_mod_delayed_work(struct kthread_worker *worker,
1012 struct kthread_delayed_work *dwork,
1013 unsigned long delay)
1014{
1015 struct kthread_work *work = &dwork->work;
1016 unsigned long flags;
1017 int ret = false;
1018
1019 spin_lock_irqsave(&worker->lock, flags);
1020
1021
1022 if (!work->worker)
1023 goto fast_queue;
1024
1025
1026 WARN_ON_ONCE(work->worker != worker);
1027
1028
1029 if (work->canceling)
1030 goto out;
1031
1032 ret = __kthread_cancel_work(work, true, &flags);
1033fast_queue:
1034 __kthread_queue_delayed_work(worker, dwork, delay);
1035out:
1036 spin_unlock_irqrestore(&worker->lock, flags);
1037 return ret;
1038}
1039EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
1040
1041static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1042{
1043 struct kthread_worker *worker = work->worker;
1044 unsigned long flags;
1045 int ret = false;
1046
1047 if (!worker)
1048 goto out;
1049
1050 spin_lock_irqsave(&worker->lock, flags);
1051
1052 WARN_ON_ONCE(work->worker != worker);
1053
1054 ret = __kthread_cancel_work(work, is_dwork, &flags);
1055
1056 if (worker->current_work != work)
1057 goto out_fast;
1058
1059
1060
1061
1062
1063 work->canceling++;
1064 spin_unlock_irqrestore(&worker->lock, flags);
1065 kthread_flush_work(work);
1066 spin_lock_irqsave(&worker->lock, flags);
1067 work->canceling--;
1068
1069out_fast:
1070 spin_unlock_irqrestore(&worker->lock, flags);
1071out:
1072 return ret;
1073}
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091bool kthread_cancel_work_sync(struct kthread_work *work)
1092{
1093 return __kthread_cancel_work_sync(work, false);
1094}
1095EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
1107{
1108 return __kthread_cancel_work_sync(&dwork->work, true);
1109}
1110EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
1111
1112
1113
1114
1115
1116
1117
1118
1119void kthread_flush_worker(struct kthread_worker *worker)
1120{
1121 struct kthread_flush_work fwork = {
1122 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1123 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1124 };
1125
1126 kthread_queue_work(worker, &fwork.work);
1127 wait_for_completion(&fwork.done);
1128}
1129EXPORT_SYMBOL_GPL(kthread_flush_worker);
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139void kthread_destroy_worker(struct kthread_worker *worker)
1140{
1141 struct task_struct *task;
1142
1143 task = worker->task;
1144 if (WARN_ON(!task))
1145 return;
1146
1147 kthread_flush_worker(worker);
1148 kthread_stop(task);
1149 WARN_ON(!list_empty(&worker->work_list));
1150 kfree(worker);
1151}
1152EXPORT_SYMBOL(kthread_destroy_worker);
1153