1
2
3
4
5
6
7
8#include <uapi/linux/sched/types.h>
9#include <linux/sched.h>
10#include <linux/sched/task.h>
11#include <linux/kthread.h>
12#include <linux/completion.h>
13#include <linux/err.h>
14#include <linux/cpuset.h>
15#include <linux/unistd.h>
16#include <linux/file.h>
17#include <linux/export.h>
18#include <linux/mutex.h>
19#include <linux/slab.h>
20#include <linux/freezer.h>
21#include <linux/ptrace.h>
22#include <linux/uaccess.h>
23#include <trace/events/sched.h>
24
25static DEFINE_SPINLOCK(kthread_create_lock);
26static LIST_HEAD(kthread_create_list);
27struct task_struct *kthreadd_task;
28
29struct kthread_create_info
30{
31
32 int (*threadfn)(void *data);
33 void *data;
34 int node;
35
36
37 struct task_struct *result;
38 struct completion *done;
39
40 struct list_head list;
41};
42
43struct kthread {
44 unsigned long flags;
45 unsigned int cpu;
46 void *data;
47 struct completion parked;
48 struct completion exited;
49#ifdef CONFIG_BLK_CGROUP
50 struct cgroup_subsys_state *blkcg_css;
51#endif
52};
53
54enum KTHREAD_BITS {
55 KTHREAD_IS_PER_CPU = 0,
56 KTHREAD_SHOULD_STOP,
57 KTHREAD_SHOULD_PARK,
58};
59
60static inline void set_kthread_struct(void *kthread)
61{
62
63
64
65
66
67 current->set_child_tid = (__force void __user *)kthread;
68}
69
70static inline struct kthread *to_kthread(struct task_struct *k)
71{
72 WARN_ON(!(k->flags & PF_KTHREAD));
73 return (__force void *)k->set_child_tid;
74}
75
76void free_kthread_struct(struct task_struct *k)
77{
78 struct kthread *kthread;
79
80
81
82
83
84 kthread = to_kthread(k);
85#ifdef CONFIG_BLK_CGROUP
86 WARN_ON_ONCE(kthread && kthread->blkcg_css);
87#endif
88 kfree(kthread);
89}
90
91
92
93
94
95
96
97
98bool kthread_should_stop(void)
99{
100 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
101}
102EXPORT_SYMBOL(kthread_should_stop);
103
104
105
106
107
108
109
110
111
112
113
114
115bool kthread_should_park(void)
116{
117 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
118}
119EXPORT_SYMBOL_GPL(kthread_should_park);
120
121
122
123
124
125
126
127
128
129
130bool kthread_freezable_should_stop(bool *was_frozen)
131{
132 bool frozen = false;
133
134 might_sleep();
135
136 if (unlikely(freezing(current)))
137 frozen = __refrigerator(true);
138
139 if (was_frozen)
140 *was_frozen = frozen;
141
142 return kthread_should_stop();
143}
144EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
145
146
147
148
149
150
151
152
153
154void *kthread_data(struct task_struct *task)
155{
156 return to_kthread(task)->data;
157}
158
159
160
161
162
163
164
165
166
167
168void *kthread_probe_data(struct task_struct *task)
169{
170 struct kthread *kthread = to_kthread(task);
171 void *data = NULL;
172
173 probe_kernel_read(&data, &kthread->data, sizeof(data));
174 return data;
175}
176
177static void __kthread_parkme(struct kthread *self)
178{
179 for (;;) {
180
181
182
183
184
185
186
187
188
189 set_special_state(TASK_PARKED);
190 if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
191 break;
192
193 complete(&self->parked);
194 schedule();
195 }
196 __set_current_state(TASK_RUNNING);
197}
198
199void kthread_parkme(void)
200{
201 __kthread_parkme(to_kthread(current));
202}
203EXPORT_SYMBOL_GPL(kthread_parkme);
204
205static int kthread(void *_create)
206{
207
208 struct kthread_create_info *create = _create;
209 int (*threadfn)(void *data) = create->threadfn;
210 void *data = create->data;
211 struct completion *done;
212 struct kthread *self;
213 int ret;
214
215 self = kzalloc(sizeof(*self), GFP_KERNEL);
216 set_kthread_struct(self);
217
218
219 done = xchg(&create->done, NULL);
220 if (!done) {
221 kfree(create);
222 do_exit(-EINTR);
223 }
224
225 if (!self) {
226 create->result = ERR_PTR(-ENOMEM);
227 complete(done);
228 do_exit(-ENOMEM);
229 }
230
231 self->data = data;
232 init_completion(&self->exited);
233 init_completion(&self->parked);
234 current->vfork_done = &self->exited;
235
236
237 __set_current_state(TASK_UNINTERRUPTIBLE);
238 create->result = current;
239 complete(done);
240 schedule();
241
242 ret = -EINTR;
243 if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
244 cgroup_kthread_ready();
245 __kthread_parkme(self);
246 ret = threadfn(data);
247 }
248 do_exit(ret);
249}
250
251
252int tsk_fork_get_node(struct task_struct *tsk)
253{
254#ifdef CONFIG_NUMA
255 if (tsk == kthreadd_task)
256 return tsk->pref_node_fork;
257#endif
258 return NUMA_NO_NODE;
259}
260
261static void create_kthread(struct kthread_create_info *create)
262{
263 int pid;
264
265#ifdef CONFIG_NUMA
266 current->pref_node_fork = create->node;
267#endif
268
269 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
270 if (pid < 0) {
271
272 struct completion *done = xchg(&create->done, NULL);
273
274 if (!done) {
275 kfree(create);
276 return;
277 }
278 create->result = ERR_PTR(pid);
279 complete(done);
280 }
281}
282
283static __printf(4, 0)
284struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
285 void *data, int node,
286 const char namefmt[],
287 va_list args)
288{
289 DECLARE_COMPLETION_ONSTACK(done);
290 struct task_struct *task;
291 struct kthread_create_info *create = kmalloc(sizeof(*create),
292 GFP_KERNEL);
293
294 if (!create)
295 return ERR_PTR(-ENOMEM);
296 create->threadfn = threadfn;
297 create->data = data;
298 create->node = node;
299 create->done = &done;
300
301 spin_lock(&kthread_create_lock);
302 list_add_tail(&create->list, &kthread_create_list);
303 spin_unlock(&kthread_create_lock);
304
305 wake_up_process(kthreadd_task);
306
307
308
309
310
311 if (unlikely(wait_for_completion_killable(&done))) {
312
313
314
315
316
317 if (xchg(&create->done, NULL))
318 return ERR_PTR(-EINTR);
319
320
321
322
323 wait_for_completion(&done);
324 }
325 task = create->result;
326 if (!IS_ERR(task)) {
327 static const struct sched_param param = { .sched_priority = 0 };
328 char name[TASK_COMM_LEN];
329
330
331
332
333
334 vsnprintf(name, sizeof(name), namefmt, args);
335 set_task_comm(task, name);
336
337
338
339
340 sched_setscheduler_nocheck(task, SCHED_NORMAL, ¶m);
341 set_cpus_allowed_ptr(task, cpu_all_mask);
342 }
343 kfree(create);
344 return task;
345}
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
371 void *data, int node,
372 const char namefmt[],
373 ...)
374{
375 struct task_struct *task;
376 va_list args;
377
378 va_start(args, namefmt);
379 task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
380 va_end(args);
381
382 return task;
383}
384EXPORT_SYMBOL(kthread_create_on_node);
385
386static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
387{
388 unsigned long flags;
389
390 if (!wait_task_inactive(p, state)) {
391 WARN_ON(1);
392 return;
393 }
394
395
396 raw_spin_lock_irqsave(&p->pi_lock, flags);
397 do_set_cpus_allowed(p, mask);
398 p->flags |= PF_NO_SETAFFINITY;
399 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
400}
401
402static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
403{
404 __kthread_bind_mask(p, cpumask_of(cpu), state);
405}
406
407void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
408{
409 __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
410}
411
412
413
414
415
416
417
418
419
420
421void kthread_bind(struct task_struct *p, unsigned int cpu)
422{
423 __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
424}
425EXPORT_SYMBOL(kthread_bind);
426
427
428
429
430
431
432
433
434
435
436
437
438struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
439 void *data, unsigned int cpu,
440 const char *namefmt)
441{
442 struct task_struct *p;
443
444 p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
445 cpu);
446 if (IS_ERR(p))
447 return p;
448 kthread_bind(p, cpu);
449
450 set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
451 to_kthread(p)->cpu = cpu;
452 return p;
453}
454
455
456
457
458
459
460
461
462
463void kthread_unpark(struct task_struct *k)
464{
465 struct kthread *kthread = to_kthread(k);
466
467
468
469
470
471 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
472 __kthread_bind(k, kthread->cpu, TASK_PARKED);
473
474 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
475
476
477
478 wake_up_state(k, TASK_PARKED);
479}
480EXPORT_SYMBOL_GPL(kthread_unpark);
481
482
483
484
485
486
487
488
489
490
491
492
493
494int kthread_park(struct task_struct *k)
495{
496 struct kthread *kthread = to_kthread(k);
497
498 if (WARN_ON(k->flags & PF_EXITING))
499 return -ENOSYS;
500
501 if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
502 return -EBUSY;
503
504 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
505 if (k != current) {
506 wake_up_process(k);
507
508
509
510
511 wait_for_completion(&kthread->parked);
512
513
514
515
516 WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
517 }
518
519 return 0;
520}
521EXPORT_SYMBOL_GPL(kthread_park);
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538int kthread_stop(struct task_struct *k)
539{
540 struct kthread *kthread;
541 int ret;
542
543 trace_sched_kthread_stop(k);
544
545 get_task_struct(k);
546 kthread = to_kthread(k);
547 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
548 kthread_unpark(k);
549 wake_up_process(k);
550 wait_for_completion(&kthread->exited);
551 ret = k->exit_code;
552 put_task_struct(k);
553
554 trace_sched_kthread_stop_ret(ret);
555 return ret;
556}
557EXPORT_SYMBOL(kthread_stop);
558
559int kthreadd(void *unused)
560{
561 struct task_struct *tsk = current;
562
563
564 set_task_comm(tsk, "kthreadd");
565 ignore_signals(tsk);
566 set_cpus_allowed_ptr(tsk, cpu_all_mask);
567 set_mems_allowed(node_states[N_MEMORY]);
568
569 current->flags |= PF_NOFREEZE;
570 cgroup_init_kthreadd();
571
572 for (;;) {
573 set_current_state(TASK_INTERRUPTIBLE);
574 if (list_empty(&kthread_create_list))
575 schedule();
576 __set_current_state(TASK_RUNNING);
577
578 spin_lock(&kthread_create_lock);
579 while (!list_empty(&kthread_create_list)) {
580 struct kthread_create_info *create;
581
582 create = list_entry(kthread_create_list.next,
583 struct kthread_create_info, list);
584 list_del_init(&create->list);
585 spin_unlock(&kthread_create_lock);
586
587 create_kthread(create);
588
589 spin_lock(&kthread_create_lock);
590 }
591 spin_unlock(&kthread_create_lock);
592 }
593
594 return 0;
595}
596
597void __kthread_init_worker(struct kthread_worker *worker,
598 const char *name,
599 struct lock_class_key *key)
600{
601 memset(worker, 0, sizeof(struct kthread_worker));
602 spin_lock_init(&worker->lock);
603 lockdep_set_class_and_name(&worker->lock, key, name);
604 INIT_LIST_HEAD(&worker->work_list);
605 INIT_LIST_HEAD(&worker->delayed_work_list);
606}
607EXPORT_SYMBOL_GPL(__kthread_init_worker);
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624int kthread_worker_fn(void *worker_ptr)
625{
626 struct kthread_worker *worker = worker_ptr;
627 struct kthread_work *work;
628
629
630
631
632
633 WARN_ON(worker->task && worker->task != current);
634 worker->task = current;
635
636 if (worker->flags & KTW_FREEZABLE)
637 set_freezable();
638
639repeat:
640 set_current_state(TASK_INTERRUPTIBLE);
641
642 if (kthread_should_stop()) {
643 __set_current_state(TASK_RUNNING);
644 spin_lock_irq(&worker->lock);
645 worker->task = NULL;
646 spin_unlock_irq(&worker->lock);
647 return 0;
648 }
649
650 work = NULL;
651 spin_lock_irq(&worker->lock);
652 if (!list_empty(&worker->work_list)) {
653 work = list_first_entry(&worker->work_list,
654 struct kthread_work, node);
655 list_del_init(&work->node);
656 }
657 worker->current_work = work;
658 spin_unlock_irq(&worker->lock);
659
660 if (work) {
661 __set_current_state(TASK_RUNNING);
662 work->func(work);
663 } else if (!freezing(current))
664 schedule();
665
666 try_to_freeze();
667 cond_resched();
668 goto repeat;
669}
670EXPORT_SYMBOL_GPL(kthread_worker_fn);
671
672static __printf(3, 0) struct kthread_worker *
673__kthread_create_worker(int cpu, unsigned int flags,
674 const char namefmt[], va_list args)
675{
676 struct kthread_worker *worker;
677 struct task_struct *task;
678 int node = -1;
679
680 worker = kzalloc(sizeof(*worker), GFP_KERNEL);
681 if (!worker)
682 return ERR_PTR(-ENOMEM);
683
684 kthread_init_worker(worker);
685
686 if (cpu >= 0)
687 node = cpu_to_node(cpu);
688
689 task = __kthread_create_on_node(kthread_worker_fn, worker,
690 node, namefmt, args);
691 if (IS_ERR(task))
692 goto fail_task;
693
694 if (cpu >= 0)
695 kthread_bind(task, cpu);
696
697 worker->flags = flags;
698 worker->task = task;
699 wake_up_process(task);
700 return worker;
701
702fail_task:
703 kfree(worker);
704 return ERR_CAST(task);
705}
706
707
708
709
710
711
712
713
714
715
716struct kthread_worker *
717kthread_create_worker(unsigned int flags, const char namefmt[], ...)
718{
719 struct kthread_worker *worker;
720 va_list args;
721
722 va_start(args, namefmt);
723 worker = __kthread_create_worker(-1, flags, namefmt, args);
724 va_end(args);
725
726 return worker;
727}
728EXPORT_SYMBOL(kthread_create_worker);
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747struct kthread_worker *
748kthread_create_worker_on_cpu(int cpu, unsigned int flags,
749 const char namefmt[], ...)
750{
751 struct kthread_worker *worker;
752 va_list args;
753
754 va_start(args, namefmt);
755 worker = __kthread_create_worker(cpu, flags, namefmt, args);
756 va_end(args);
757
758 return worker;
759}
760EXPORT_SYMBOL(kthread_create_worker_on_cpu);
761
762
763
764
765
766
767static inline bool queuing_blocked(struct kthread_worker *worker,
768 struct kthread_work *work)
769{
770 lockdep_assert_held(&worker->lock);
771
772 return !list_empty(&work->node) || work->canceling;
773}
774
775static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
776 struct kthread_work *work)
777{
778 lockdep_assert_held(&worker->lock);
779 WARN_ON_ONCE(!list_empty(&work->node));
780
781 WARN_ON_ONCE(work->worker && work->worker != worker);
782}
783
784
785static void kthread_insert_work(struct kthread_worker *worker,
786 struct kthread_work *work,
787 struct list_head *pos)
788{
789 kthread_insert_work_sanity_check(worker, work);
790
791 list_add_tail(&work->node, pos);
792 work->worker = worker;
793 if (!worker->current_work && likely(worker->task))
794 wake_up_process(worker->task);
795}
796
797
798
799
800
801
802
803
804
805
806
807
808
809bool kthread_queue_work(struct kthread_worker *worker,
810 struct kthread_work *work)
811{
812 bool ret = false;
813 unsigned long flags;
814
815 spin_lock_irqsave(&worker->lock, flags);
816 if (!queuing_blocked(worker, work)) {
817 kthread_insert_work(worker, work, &worker->work_list);
818 ret = true;
819 }
820 spin_unlock_irqrestore(&worker->lock, flags);
821 return ret;
822}
823EXPORT_SYMBOL_GPL(kthread_queue_work);
824
825
826
827
828
829
830
831
832
833void kthread_delayed_work_timer_fn(struct timer_list *t)
834{
835 struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
836 struct kthread_work *work = &dwork->work;
837 struct kthread_worker *worker = work->worker;
838
839
840
841
842
843 if (WARN_ON_ONCE(!worker))
844 return;
845
846 spin_lock(&worker->lock);
847
848 WARN_ON_ONCE(work->worker != worker);
849
850
851 WARN_ON_ONCE(list_empty(&work->node));
852 list_del_init(&work->node);
853 kthread_insert_work(worker, work, &worker->work_list);
854
855 spin_unlock(&worker->lock);
856}
857EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
858
859void __kthread_queue_delayed_work(struct kthread_worker *worker,
860 struct kthread_delayed_work *dwork,
861 unsigned long delay)
862{
863 struct timer_list *timer = &dwork->timer;
864 struct kthread_work *work = &dwork->work;
865
866 WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
867
868
869
870
871
872
873
874 if (!delay) {
875 kthread_insert_work(worker, work, &worker->work_list);
876 return;
877 }
878
879
880 kthread_insert_work_sanity_check(worker, work);
881
882 list_add(&work->node, &worker->delayed_work_list);
883 work->worker = worker;
884 timer->expires = jiffies + delay;
885 add_timer(timer);
886}
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903bool kthread_queue_delayed_work(struct kthread_worker *worker,
904 struct kthread_delayed_work *dwork,
905 unsigned long delay)
906{
907 struct kthread_work *work = &dwork->work;
908 unsigned long flags;
909 bool ret = false;
910
911 spin_lock_irqsave(&worker->lock, flags);
912
913 if (!queuing_blocked(worker, work)) {
914 __kthread_queue_delayed_work(worker, dwork, delay);
915 ret = true;
916 }
917
918 spin_unlock_irqrestore(&worker->lock, flags);
919 return ret;
920}
921EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
922
923struct kthread_flush_work {
924 struct kthread_work work;
925 struct completion done;
926};
927
928static void kthread_flush_work_fn(struct kthread_work *work)
929{
930 struct kthread_flush_work *fwork =
931 container_of(work, struct kthread_flush_work, work);
932 complete(&fwork->done);
933}
934
935
936
937
938
939
940
941void kthread_flush_work(struct kthread_work *work)
942{
943 struct kthread_flush_work fwork = {
944 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
945 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
946 };
947 struct kthread_worker *worker;
948 bool noop = false;
949
950 worker = work->worker;
951 if (!worker)
952 return;
953
954 spin_lock_irq(&worker->lock);
955
956 WARN_ON_ONCE(work->worker != worker);
957
958 if (!list_empty(&work->node))
959 kthread_insert_work(worker, &fwork.work, work->node.next);
960 else if (worker->current_work == work)
961 kthread_insert_work(worker, &fwork.work,
962 worker->work_list.next);
963 else
964 noop = true;
965
966 spin_unlock_irq(&worker->lock);
967
968 if (!noop)
969 wait_for_completion(&fwork.done);
970}
971EXPORT_SYMBOL_GPL(kthread_flush_work);
972
973
974
975
976
977
978
979
980
981
982
983static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
984 unsigned long *flags)
985{
986
987 if (is_dwork) {
988 struct kthread_delayed_work *dwork =
989 container_of(work, struct kthread_delayed_work, work);
990 struct kthread_worker *worker = work->worker;
991
992
993
994
995
996
997
998 work->canceling++;
999 spin_unlock_irqrestore(&worker->lock, *flags);
1000 del_timer_sync(&dwork->timer);
1001 spin_lock_irqsave(&worker->lock, *flags);
1002 work->canceling--;
1003 }
1004
1005
1006
1007
1008
1009 if (!list_empty(&work->node)) {
1010 list_del_init(&work->node);
1011 return true;
1012 }
1013
1014 return false;
1015}
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040bool kthread_mod_delayed_work(struct kthread_worker *worker,
1041 struct kthread_delayed_work *dwork,
1042 unsigned long delay)
1043{
1044 struct kthread_work *work = &dwork->work;
1045 unsigned long flags;
1046 int ret = false;
1047
1048 spin_lock_irqsave(&worker->lock, flags);
1049
1050
1051 if (!work->worker)
1052 goto fast_queue;
1053
1054
1055 WARN_ON_ONCE(work->worker != worker);
1056
1057
1058 if (work->canceling)
1059 goto out;
1060
1061 ret = __kthread_cancel_work(work, true, &flags);
1062fast_queue:
1063 __kthread_queue_delayed_work(worker, dwork, delay);
1064out:
1065 spin_unlock_irqrestore(&worker->lock, flags);
1066 return ret;
1067}
1068EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
1069
1070static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1071{
1072 struct kthread_worker *worker = work->worker;
1073 unsigned long flags;
1074 int ret = false;
1075
1076 if (!worker)
1077 goto out;
1078
1079 spin_lock_irqsave(&worker->lock, flags);
1080
1081 WARN_ON_ONCE(work->worker != worker);
1082
1083 ret = __kthread_cancel_work(work, is_dwork, &flags);
1084
1085 if (worker->current_work != work)
1086 goto out_fast;
1087
1088
1089
1090
1091
1092 work->canceling++;
1093 spin_unlock_irqrestore(&worker->lock, flags);
1094 kthread_flush_work(work);
1095 spin_lock_irqsave(&worker->lock, flags);
1096 work->canceling--;
1097
1098out_fast:
1099 spin_unlock_irqrestore(&worker->lock, flags);
1100out:
1101 return ret;
1102}
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120bool kthread_cancel_work_sync(struct kthread_work *work)
1121{
1122 return __kthread_cancel_work_sync(work, false);
1123}
1124EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
1136{
1137 return __kthread_cancel_work_sync(&dwork->work, true);
1138}
1139EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
1140
1141
1142
1143
1144
1145
1146
1147
1148void kthread_flush_worker(struct kthread_worker *worker)
1149{
1150 struct kthread_flush_work fwork = {
1151 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1152 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1153 };
1154
1155 kthread_queue_work(worker, &fwork.work);
1156 wait_for_completion(&fwork.done);
1157}
1158EXPORT_SYMBOL_GPL(kthread_flush_worker);
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168void kthread_destroy_worker(struct kthread_worker *worker)
1169{
1170 struct task_struct *task;
1171
1172 task = worker->task;
1173 if (WARN_ON(!task))
1174 return;
1175
1176 kthread_flush_worker(worker);
1177 kthread_stop(task);
1178 WARN_ON(!list_empty(&worker->work_list));
1179 kfree(worker);
1180}
1181EXPORT_SYMBOL(kthread_destroy_worker);
1182
1183#ifdef CONFIG_BLK_CGROUP
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194void kthread_associate_blkcg(struct cgroup_subsys_state *css)
1195{
1196 struct kthread *kthread;
1197
1198 if (!(current->flags & PF_KTHREAD))
1199 return;
1200 kthread = to_kthread(current);
1201 if (!kthread)
1202 return;
1203
1204 if (kthread->blkcg_css) {
1205 css_put(kthread->blkcg_css);
1206 kthread->blkcg_css = NULL;
1207 }
1208 if (css) {
1209 css_get(css);
1210 kthread->blkcg_css = css;
1211 }
1212}
1213EXPORT_SYMBOL(kthread_associate_blkcg);
1214
1215
1216
1217
1218
1219
1220struct cgroup_subsys_state *kthread_blkcg(void)
1221{
1222 struct kthread *kthread;
1223
1224 if (current->flags & PF_KTHREAD) {
1225 kthread = to_kthread(current);
1226 if (kthread)
1227 return kthread->blkcg_css;
1228 }
1229 return NULL;
1230}
1231EXPORT_SYMBOL(kthread_blkcg);
1232#endif
1233