1
2
3
4
5
6
7
8#include <uapi/linux/sched/types.h>
9#include <linux/sched.h>
10#include <linux/sched/task.h>
11#include <linux/kthread.h>
12#include <linux/completion.h>
13#include <linux/err.h>
14#include <linux/cpuset.h>
15#include <linux/unistd.h>
16#include <linux/file.h>
17#include <linux/export.h>
18#include <linux/mutex.h>
19#include <linux/slab.h>
20#include <linux/freezer.h>
21#include <linux/ptrace.h>
22#include <linux/uaccess.h>
23#include <trace/events/sched.h>
24
25static DEFINE_SPINLOCK(kthread_create_lock);
26static LIST_HEAD(kthread_create_list);
27struct task_struct *kthreadd_task;
28
29struct kthread_create_info
30{
31
32 int (*threadfn)(void *data);
33 void *data;
34 int node;
35
36
37 struct task_struct *result;
38 struct completion *done;
39
40 struct list_head list;
41};
42
43struct kthread {
44 unsigned long flags;
45 unsigned int cpu;
46 void *data;
47 struct completion parked;
48 struct completion exited;
49#ifdef CONFIG_BLK_CGROUP
50 struct cgroup_subsys_state *blkcg_css;
51#endif
52};
53
54enum KTHREAD_BITS {
55 KTHREAD_IS_PER_CPU = 0,
56 KTHREAD_SHOULD_STOP,
57 KTHREAD_SHOULD_PARK,
58};
59
60static inline void set_kthread_struct(void *kthread)
61{
62
63
64
65
66
67 current->set_child_tid = (__force void __user *)kthread;
68}
69
70static inline struct kthread *to_kthread(struct task_struct *k)
71{
72 WARN_ON(!(k->flags & PF_KTHREAD));
73 return (__force void *)k->set_child_tid;
74}
75
76void free_kthread_struct(struct task_struct *k)
77{
78 struct kthread *kthread;
79
80
81
82
83
84 kthread = to_kthread(k);
85#ifdef CONFIG_BLK_CGROUP
86 WARN_ON_ONCE(kthread && kthread->blkcg_css);
87#endif
88 kfree(kthread);
89}
90
91
92
93
94
95
96
97
98bool kthread_should_stop(void)
99{
100 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
101}
102EXPORT_SYMBOL(kthread_should_stop);
103
104
105
106
107
108
109
110
111
112
113
114
115bool kthread_should_park(void)
116{
117 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
118}
119EXPORT_SYMBOL_GPL(kthread_should_park);
120
121
122
123
124
125
126
127
128
129
130bool kthread_freezable_should_stop(bool *was_frozen)
131{
132 bool frozen = false;
133
134 might_sleep();
135
136 if (unlikely(freezing(current)))
137 frozen = __refrigerator(true);
138
139 if (was_frozen)
140 *was_frozen = frozen;
141
142 return kthread_should_stop();
143}
144EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
145
146
147
148
149
150
151
152
153
154void *kthread_data(struct task_struct *task)
155{
156 return to_kthread(task)->data;
157}
158
159
160
161
162
163
164
165
166
167
168void *kthread_probe_data(struct task_struct *task)
169{
170 struct kthread *kthread = to_kthread(task);
171 void *data = NULL;
172
173 probe_kernel_read(&data, &kthread->data, sizeof(data));
174 return data;
175}
176
177static void __kthread_parkme(struct kthread *self)
178{
179 for (;;) {
180 set_current_state(TASK_PARKED);
181 if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
182 break;
183 schedule();
184 }
185 __set_current_state(TASK_RUNNING);
186}
187
188void kthread_parkme(void)
189{
190 __kthread_parkme(to_kthread(current));
191}
192EXPORT_SYMBOL_GPL(kthread_parkme);
193
194void kthread_park_complete(struct task_struct *k)
195{
196 complete_all(&to_kthread(k)->parked);
197}
198
199static int kthread(void *_create)
200{
201
202 struct kthread_create_info *create = _create;
203 int (*threadfn)(void *data) = create->threadfn;
204 void *data = create->data;
205 struct completion *done;
206 struct kthread *self;
207 int ret;
208
209 self = kzalloc(sizeof(*self), GFP_KERNEL);
210 set_kthread_struct(self);
211
212
213 done = xchg(&create->done, NULL);
214 if (!done) {
215 kfree(create);
216 do_exit(-EINTR);
217 }
218
219 if (!self) {
220 create->result = ERR_PTR(-ENOMEM);
221 complete(done);
222 do_exit(-ENOMEM);
223 }
224
225 self->data = data;
226 init_completion(&self->exited);
227 init_completion(&self->parked);
228 current->vfork_done = &self->exited;
229
230
231 __set_current_state(TASK_UNINTERRUPTIBLE);
232 create->result = current;
233 complete(done);
234 schedule();
235
236 ret = -EINTR;
237 if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
238 cgroup_kthread_ready();
239 __kthread_parkme(self);
240 ret = threadfn(data);
241 }
242 do_exit(ret);
243}
244
245
246int tsk_fork_get_node(struct task_struct *tsk)
247{
248#ifdef CONFIG_NUMA
249 if (tsk == kthreadd_task)
250 return tsk->pref_node_fork;
251#endif
252 return NUMA_NO_NODE;
253}
254
255static void create_kthread(struct kthread_create_info *create)
256{
257 int pid;
258
259#ifdef CONFIG_NUMA
260 current->pref_node_fork = create->node;
261#endif
262
263 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
264 if (pid < 0) {
265
266 struct completion *done = xchg(&create->done, NULL);
267
268 if (!done) {
269 kfree(create);
270 return;
271 }
272 create->result = ERR_PTR(pid);
273 complete(done);
274 }
275}
276
277static __printf(4, 0)
278struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
279 void *data, int node,
280 const char namefmt[],
281 va_list args)
282{
283 DECLARE_COMPLETION_ONSTACK(done);
284 struct task_struct *task;
285 struct kthread_create_info *create = kmalloc(sizeof(*create),
286 GFP_KERNEL);
287
288 if (!create)
289 return ERR_PTR(-ENOMEM);
290 create->threadfn = threadfn;
291 create->data = data;
292 create->node = node;
293 create->done = &done;
294
295 spin_lock(&kthread_create_lock);
296 list_add_tail(&create->list, &kthread_create_list);
297 spin_unlock(&kthread_create_lock);
298
299 wake_up_process(kthreadd_task);
300
301
302
303
304
305 if (unlikely(wait_for_completion_killable(&done))) {
306
307
308
309
310
311 if (xchg(&create->done, NULL))
312 return ERR_PTR(-EINTR);
313
314
315
316
317 wait_for_completion(&done);
318 }
319 task = create->result;
320 if (!IS_ERR(task)) {
321 static const struct sched_param param = { .sched_priority = 0 };
322
323 vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
324
325
326
327
328 sched_setscheduler_nocheck(task, SCHED_NORMAL, ¶m);
329 set_cpus_allowed_ptr(task, cpu_all_mask);
330 }
331 kfree(create);
332 return task;
333}
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
359 void *data, int node,
360 const char namefmt[],
361 ...)
362{
363 struct task_struct *task;
364 va_list args;
365
366 va_start(args, namefmt);
367 task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
368 va_end(args);
369
370 return task;
371}
372EXPORT_SYMBOL(kthread_create_on_node);
373
374static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
375{
376 unsigned long flags;
377
378 if (!wait_task_inactive(p, state)) {
379 WARN_ON(1);
380 return;
381 }
382
383
384 raw_spin_lock_irqsave(&p->pi_lock, flags);
385 do_set_cpus_allowed(p, mask);
386 p->flags |= PF_NO_SETAFFINITY;
387 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
388}
389
390static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
391{
392 __kthread_bind_mask(p, cpumask_of(cpu), state);
393}
394
395void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
396{
397 __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
398}
399
400
401
402
403
404
405
406
407
408
409void kthread_bind(struct task_struct *p, unsigned int cpu)
410{
411 __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
412}
413EXPORT_SYMBOL(kthread_bind);
414
415
416
417
418
419
420
421
422
423
424
425
426struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
427 void *data, unsigned int cpu,
428 const char *namefmt)
429{
430 struct task_struct *p;
431
432 p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
433 cpu);
434 if (IS_ERR(p))
435 return p;
436 kthread_bind(p, cpu);
437
438 set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
439 to_kthread(p)->cpu = cpu;
440 return p;
441}
442
443
444
445
446
447
448
449
450
451void kthread_unpark(struct task_struct *k)
452{
453 struct kthread *kthread = to_kthread(k);
454
455
456
457
458
459 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
460 __kthread_bind(k, kthread->cpu, TASK_PARKED);
461
462 reinit_completion(&kthread->parked);
463 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
464 wake_up_state(k, TASK_PARKED);
465}
466EXPORT_SYMBOL_GPL(kthread_unpark);
467
468
469
470
471
472
473
474
475
476
477
478
479
480int kthread_park(struct task_struct *k)
481{
482 struct kthread *kthread = to_kthread(k);
483
484 if (WARN_ON(k->flags & PF_EXITING))
485 return -ENOSYS;
486
487 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
488 if (k != current) {
489 wake_up_process(k);
490 wait_for_completion(&kthread->parked);
491 }
492
493 return 0;
494}
495EXPORT_SYMBOL_GPL(kthread_park);
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512int kthread_stop(struct task_struct *k)
513{
514 struct kthread *kthread;
515 int ret;
516
517 trace_sched_kthread_stop(k);
518
519 get_task_struct(k);
520 kthread = to_kthread(k);
521 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
522 kthread_unpark(k);
523 wake_up_process(k);
524 wait_for_completion(&kthread->exited);
525 ret = k->exit_code;
526 put_task_struct(k);
527
528 trace_sched_kthread_stop_ret(ret);
529 return ret;
530}
531EXPORT_SYMBOL(kthread_stop);
532
533int kthreadd(void *unused)
534{
535 struct task_struct *tsk = current;
536
537
538 set_task_comm(tsk, "kthreadd");
539 ignore_signals(tsk);
540 set_cpus_allowed_ptr(tsk, cpu_all_mask);
541 set_mems_allowed(node_states[N_MEMORY]);
542
543 current->flags |= PF_NOFREEZE;
544 cgroup_init_kthreadd();
545
546 for (;;) {
547 set_current_state(TASK_INTERRUPTIBLE);
548 if (list_empty(&kthread_create_list))
549 schedule();
550 __set_current_state(TASK_RUNNING);
551
552 spin_lock(&kthread_create_lock);
553 while (!list_empty(&kthread_create_list)) {
554 struct kthread_create_info *create;
555
556 create = list_entry(kthread_create_list.next,
557 struct kthread_create_info, list);
558 list_del_init(&create->list);
559 spin_unlock(&kthread_create_lock);
560
561 create_kthread(create);
562
563 spin_lock(&kthread_create_lock);
564 }
565 spin_unlock(&kthread_create_lock);
566 }
567
568 return 0;
569}
570
571void __kthread_init_worker(struct kthread_worker *worker,
572 const char *name,
573 struct lock_class_key *key)
574{
575 memset(worker, 0, sizeof(struct kthread_worker));
576 spin_lock_init(&worker->lock);
577 lockdep_set_class_and_name(&worker->lock, key, name);
578 INIT_LIST_HEAD(&worker->work_list);
579 INIT_LIST_HEAD(&worker->delayed_work_list);
580}
581EXPORT_SYMBOL_GPL(__kthread_init_worker);
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598int kthread_worker_fn(void *worker_ptr)
599{
600 struct kthread_worker *worker = worker_ptr;
601 struct kthread_work *work;
602
603
604
605
606
607 WARN_ON(worker->task && worker->task != current);
608 worker->task = current;
609
610 if (worker->flags & KTW_FREEZABLE)
611 set_freezable();
612
613repeat:
614 set_current_state(TASK_INTERRUPTIBLE);
615
616 if (kthread_should_stop()) {
617 __set_current_state(TASK_RUNNING);
618 spin_lock_irq(&worker->lock);
619 worker->task = NULL;
620 spin_unlock_irq(&worker->lock);
621 return 0;
622 }
623
624 work = NULL;
625 spin_lock_irq(&worker->lock);
626 if (!list_empty(&worker->work_list)) {
627 work = list_first_entry(&worker->work_list,
628 struct kthread_work, node);
629 list_del_init(&work->node);
630 }
631 worker->current_work = work;
632 spin_unlock_irq(&worker->lock);
633
634 if (work) {
635 __set_current_state(TASK_RUNNING);
636 work->func(work);
637 } else if (!freezing(current))
638 schedule();
639
640 try_to_freeze();
641 cond_resched();
642 goto repeat;
643}
644EXPORT_SYMBOL_GPL(kthread_worker_fn);
645
646static __printf(3, 0) struct kthread_worker *
647__kthread_create_worker(int cpu, unsigned int flags,
648 const char namefmt[], va_list args)
649{
650 struct kthread_worker *worker;
651 struct task_struct *task;
652 int node = -1;
653
654 worker = kzalloc(sizeof(*worker), GFP_KERNEL);
655 if (!worker)
656 return ERR_PTR(-ENOMEM);
657
658 kthread_init_worker(worker);
659
660 if (cpu >= 0)
661 node = cpu_to_node(cpu);
662
663 task = __kthread_create_on_node(kthread_worker_fn, worker,
664 node, namefmt, args);
665 if (IS_ERR(task))
666 goto fail_task;
667
668 if (cpu >= 0)
669 kthread_bind(task, cpu);
670
671 worker->flags = flags;
672 worker->task = task;
673 wake_up_process(task);
674 return worker;
675
676fail_task:
677 kfree(worker);
678 return ERR_CAST(task);
679}
680
681
682
683
684
685
686
687
688
689
690struct kthread_worker *
691kthread_create_worker(unsigned int flags, const char namefmt[], ...)
692{
693 struct kthread_worker *worker;
694 va_list args;
695
696 va_start(args, namefmt);
697 worker = __kthread_create_worker(-1, flags, namefmt, args);
698 va_end(args);
699
700 return worker;
701}
702EXPORT_SYMBOL(kthread_create_worker);
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721struct kthread_worker *
722kthread_create_worker_on_cpu(int cpu, unsigned int flags,
723 const char namefmt[], ...)
724{
725 struct kthread_worker *worker;
726 va_list args;
727
728 va_start(args, namefmt);
729 worker = __kthread_create_worker(cpu, flags, namefmt, args);
730 va_end(args);
731
732 return worker;
733}
734EXPORT_SYMBOL(kthread_create_worker_on_cpu);
735
736
737
738
739
740
741static inline bool queuing_blocked(struct kthread_worker *worker,
742 struct kthread_work *work)
743{
744 lockdep_assert_held(&worker->lock);
745
746 return !list_empty(&work->node) || work->canceling;
747}
748
749static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
750 struct kthread_work *work)
751{
752 lockdep_assert_held(&worker->lock);
753 WARN_ON_ONCE(!list_empty(&work->node));
754
755 WARN_ON_ONCE(work->worker && work->worker != worker);
756}
757
758
759static void kthread_insert_work(struct kthread_worker *worker,
760 struct kthread_work *work,
761 struct list_head *pos)
762{
763 kthread_insert_work_sanity_check(worker, work);
764
765 list_add_tail(&work->node, pos);
766 work->worker = worker;
767 if (!worker->current_work && likely(worker->task))
768 wake_up_process(worker->task);
769}
770
771
772
773
774
775
776
777
778
779
780
781
782
783bool kthread_queue_work(struct kthread_worker *worker,
784 struct kthread_work *work)
785{
786 bool ret = false;
787 unsigned long flags;
788
789 spin_lock_irqsave(&worker->lock, flags);
790 if (!queuing_blocked(worker, work)) {
791 kthread_insert_work(worker, work, &worker->work_list);
792 ret = true;
793 }
794 spin_unlock_irqrestore(&worker->lock, flags);
795 return ret;
796}
797EXPORT_SYMBOL_GPL(kthread_queue_work);
798
799
800
801
802
803
804
805
806
807void kthread_delayed_work_timer_fn(struct timer_list *t)
808{
809 struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
810 struct kthread_work *work = &dwork->work;
811 struct kthread_worker *worker = work->worker;
812
813
814
815
816
817 if (WARN_ON_ONCE(!worker))
818 return;
819
820 spin_lock(&worker->lock);
821
822 WARN_ON_ONCE(work->worker != worker);
823
824
825 WARN_ON_ONCE(list_empty(&work->node));
826 list_del_init(&work->node);
827 kthread_insert_work(worker, work, &worker->work_list);
828
829 spin_unlock(&worker->lock);
830}
831EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
832
833void __kthread_queue_delayed_work(struct kthread_worker *worker,
834 struct kthread_delayed_work *dwork,
835 unsigned long delay)
836{
837 struct timer_list *timer = &dwork->timer;
838 struct kthread_work *work = &dwork->work;
839
840 WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
841
842
843
844
845
846
847
848 if (!delay) {
849 kthread_insert_work(worker, work, &worker->work_list);
850 return;
851 }
852
853
854 kthread_insert_work_sanity_check(worker, work);
855
856 list_add(&work->node, &worker->delayed_work_list);
857 work->worker = worker;
858 timer->expires = jiffies + delay;
859 add_timer(timer);
860}
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877bool kthread_queue_delayed_work(struct kthread_worker *worker,
878 struct kthread_delayed_work *dwork,
879 unsigned long delay)
880{
881 struct kthread_work *work = &dwork->work;
882 unsigned long flags;
883 bool ret = false;
884
885 spin_lock_irqsave(&worker->lock, flags);
886
887 if (!queuing_blocked(worker, work)) {
888 __kthread_queue_delayed_work(worker, dwork, delay);
889 ret = true;
890 }
891
892 spin_unlock_irqrestore(&worker->lock, flags);
893 return ret;
894}
895EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
896
897struct kthread_flush_work {
898 struct kthread_work work;
899 struct completion done;
900};
901
902static void kthread_flush_work_fn(struct kthread_work *work)
903{
904 struct kthread_flush_work *fwork =
905 container_of(work, struct kthread_flush_work, work);
906 complete(&fwork->done);
907}
908
909
910
911
912
913
914
915void kthread_flush_work(struct kthread_work *work)
916{
917 struct kthread_flush_work fwork = {
918 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
919 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
920 };
921 struct kthread_worker *worker;
922 bool noop = false;
923
924 worker = work->worker;
925 if (!worker)
926 return;
927
928 spin_lock_irq(&worker->lock);
929
930 WARN_ON_ONCE(work->worker != worker);
931
932 if (!list_empty(&work->node))
933 kthread_insert_work(worker, &fwork.work, work->node.next);
934 else if (worker->current_work == work)
935 kthread_insert_work(worker, &fwork.work,
936 worker->work_list.next);
937 else
938 noop = true;
939
940 spin_unlock_irq(&worker->lock);
941
942 if (!noop)
943 wait_for_completion(&fwork.done);
944}
945EXPORT_SYMBOL_GPL(kthread_flush_work);
946
947
948
949
950
951
952
953
954
955
956
957static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
958 unsigned long *flags)
959{
960
961 if (is_dwork) {
962 struct kthread_delayed_work *dwork =
963 container_of(work, struct kthread_delayed_work, work);
964 struct kthread_worker *worker = work->worker;
965
966
967
968
969
970
971
972 work->canceling++;
973 spin_unlock_irqrestore(&worker->lock, *flags);
974 del_timer_sync(&dwork->timer);
975 spin_lock_irqsave(&worker->lock, *flags);
976 work->canceling--;
977 }
978
979
980
981
982
983 if (!list_empty(&work->node)) {
984 list_del_init(&work->node);
985 return true;
986 }
987
988 return false;
989}
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014bool kthread_mod_delayed_work(struct kthread_worker *worker,
1015 struct kthread_delayed_work *dwork,
1016 unsigned long delay)
1017{
1018 struct kthread_work *work = &dwork->work;
1019 unsigned long flags;
1020 int ret = false;
1021
1022 spin_lock_irqsave(&worker->lock, flags);
1023
1024
1025 if (!work->worker)
1026 goto fast_queue;
1027
1028
1029 WARN_ON_ONCE(work->worker != worker);
1030
1031
1032 if (work->canceling)
1033 goto out;
1034
1035 ret = __kthread_cancel_work(work, true, &flags);
1036fast_queue:
1037 __kthread_queue_delayed_work(worker, dwork, delay);
1038out:
1039 spin_unlock_irqrestore(&worker->lock, flags);
1040 return ret;
1041}
1042EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
1043
1044static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1045{
1046 struct kthread_worker *worker = work->worker;
1047 unsigned long flags;
1048 int ret = false;
1049
1050 if (!worker)
1051 goto out;
1052
1053 spin_lock_irqsave(&worker->lock, flags);
1054
1055 WARN_ON_ONCE(work->worker != worker);
1056
1057 ret = __kthread_cancel_work(work, is_dwork, &flags);
1058
1059 if (worker->current_work != work)
1060 goto out_fast;
1061
1062
1063
1064
1065
1066 work->canceling++;
1067 spin_unlock_irqrestore(&worker->lock, flags);
1068 kthread_flush_work(work);
1069 spin_lock_irqsave(&worker->lock, flags);
1070 work->canceling--;
1071
1072out_fast:
1073 spin_unlock_irqrestore(&worker->lock, flags);
1074out:
1075 return ret;
1076}
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094bool kthread_cancel_work_sync(struct kthread_work *work)
1095{
1096 return __kthread_cancel_work_sync(work, false);
1097}
1098EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
1110{
1111 return __kthread_cancel_work_sync(&dwork->work, true);
1112}
1113EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
1114
1115
1116
1117
1118
1119
1120
1121
1122void kthread_flush_worker(struct kthread_worker *worker)
1123{
1124 struct kthread_flush_work fwork = {
1125 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1126 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1127 };
1128
1129 kthread_queue_work(worker, &fwork.work);
1130 wait_for_completion(&fwork.done);
1131}
1132EXPORT_SYMBOL_GPL(kthread_flush_worker);
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142void kthread_destroy_worker(struct kthread_worker *worker)
1143{
1144 struct task_struct *task;
1145
1146 task = worker->task;
1147 if (WARN_ON(!task))
1148 return;
1149
1150 kthread_flush_worker(worker);
1151 kthread_stop(task);
1152 WARN_ON(!list_empty(&worker->work_list));
1153 kfree(worker);
1154}
1155EXPORT_SYMBOL(kthread_destroy_worker);
1156
1157#ifdef CONFIG_BLK_CGROUP
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168void kthread_associate_blkcg(struct cgroup_subsys_state *css)
1169{
1170 struct kthread *kthread;
1171
1172 if (!(current->flags & PF_KTHREAD))
1173 return;
1174 kthread = to_kthread(current);
1175 if (!kthread)
1176 return;
1177
1178 if (kthread->blkcg_css) {
1179 css_put(kthread->blkcg_css);
1180 kthread->blkcg_css = NULL;
1181 }
1182 if (css) {
1183 css_get(css);
1184 kthread->blkcg_css = css;
1185 }
1186}
1187EXPORT_SYMBOL(kthread_associate_blkcg);
1188
1189
1190
1191
1192
1193
1194struct cgroup_subsys_state *kthread_blkcg(void)
1195{
1196 struct kthread *kthread;
1197
1198 if (current->flags & PF_KTHREAD) {
1199 kthread = to_kthread(current);
1200 if (kthread)
1201 return kthread->blkcg_css;
1202 }
1203 return NULL;
1204}
1205EXPORT_SYMBOL(kthread_blkcg);
1206#endif
1207