1
2
3
4
5
6
7
8
9#include <uapi/linux/sched/types.h>
10#include <linux/sched.h>
11#include <linux/sched/task.h>
12#include <linux/kthread.h>
13#include <linux/completion.h>
14#include <linux/err.h>
15#include <linux/cgroup.h>
16#include <linux/cpuset.h>
17#include <linux/unistd.h>
18#include <linux/file.h>
19#include <linux/export.h>
20#include <linux/mutex.h>
21#include <linux/slab.h>
22#include <linux/freezer.h>
23#include <linux/ptrace.h>
24#include <linux/uaccess.h>
25#include <linux/numa.h>
26#include <trace/events/sched.h>
27
28static DEFINE_SPINLOCK(kthread_create_lock);
29static LIST_HEAD(kthread_create_list);
30struct task_struct *kthreadd_task;
31
32struct kthread_create_info
33{
34
35 int (*threadfn)(void *data);
36 void *data;
37 int node;
38
39
40 struct task_struct *result;
41 struct completion *done;
42
43 struct list_head list;
44};
45
46struct kthread {
47 unsigned long flags;
48 unsigned int cpu;
49 void *data;
50 struct completion parked;
51 struct completion exited;
52#ifdef CONFIG_BLK_CGROUP
53 struct cgroup_subsys_state *blkcg_css;
54#endif
55};
56
57enum KTHREAD_BITS {
58 KTHREAD_IS_PER_CPU = 0,
59 KTHREAD_SHOULD_STOP,
60 KTHREAD_SHOULD_PARK,
61};
62
63static inline void set_kthread_struct(void *kthread)
64{
65
66
67
68
69
70 current->set_child_tid = (__force void __user *)kthread;
71}
72
73static inline struct kthread *to_kthread(struct task_struct *k)
74{
75 WARN_ON(!(k->flags & PF_KTHREAD));
76 return (__force void *)k->set_child_tid;
77}
78
79void free_kthread_struct(struct task_struct *k)
80{
81 struct kthread *kthread;
82
83
84
85
86
87 kthread = to_kthread(k);
88#ifdef CONFIG_BLK_CGROUP
89 WARN_ON_ONCE(kthread && kthread->blkcg_css);
90#endif
91 kfree(kthread);
92}
93
94
95
96
97
98
99
100
101bool kthread_should_stop(void)
102{
103 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
104}
105EXPORT_SYMBOL(kthread_should_stop);
106
107bool __kthread_should_park(struct task_struct *k)
108{
109 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
110}
111EXPORT_SYMBOL_GPL(__kthread_should_park);
112
113
114
115
116
117
118
119
120
121
122
123
124bool kthread_should_park(void)
125{
126 return __kthread_should_park(current);
127}
128EXPORT_SYMBOL_GPL(kthread_should_park);
129
130
131
132
133
134
135
136
137
138
139bool kthread_freezable_should_stop(bool *was_frozen)
140{
141 bool frozen = false;
142
143 might_sleep();
144
145 if (unlikely(freezing(current)))
146 frozen = __refrigerator(true);
147
148 if (was_frozen)
149 *was_frozen = frozen;
150
151 return kthread_should_stop();
152}
153EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
154
155
156
157
158
159
160
161
162
163void *kthread_data(struct task_struct *task)
164{
165 return to_kthread(task)->data;
166}
167
168
169
170
171
172
173
174
175
176
177void *kthread_probe_data(struct task_struct *task)
178{
179 struct kthread *kthread = to_kthread(task);
180 void *data = NULL;
181
182 probe_kernel_read(&data, &kthread->data, sizeof(data));
183 return data;
184}
185
186static void __kthread_parkme(struct kthread *self)
187{
188 for (;;) {
189
190
191
192
193
194
195
196
197
198 set_special_state(TASK_PARKED);
199 if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
200 break;
201
202
203
204
205
206
207 preempt_disable();
208 complete(&self->parked);
209 schedule_preempt_disabled();
210 preempt_enable();
211 }
212 __set_current_state(TASK_RUNNING);
213}
214
215void kthread_parkme(void)
216{
217 __kthread_parkme(to_kthread(current));
218}
219EXPORT_SYMBOL_GPL(kthread_parkme);
220
221static int kthread(void *_create)
222{
223
224 struct kthread_create_info *create = _create;
225 int (*threadfn)(void *data) = create->threadfn;
226 void *data = create->data;
227 struct completion *done;
228 struct kthread *self;
229 int ret;
230
231 self = kzalloc(sizeof(*self), GFP_KERNEL);
232 set_kthread_struct(self);
233
234
235 done = xchg(&create->done, NULL);
236 if (!done) {
237 kfree(create);
238 do_exit(-EINTR);
239 }
240
241 if (!self) {
242 create->result = ERR_PTR(-ENOMEM);
243 complete(done);
244 do_exit(-ENOMEM);
245 }
246
247 self->data = data;
248 init_completion(&self->exited);
249 init_completion(&self->parked);
250 current->vfork_done = &self->exited;
251
252
253 __set_current_state(TASK_UNINTERRUPTIBLE);
254 create->result = current;
255
256
257
258
259 preempt_disable();
260 complete(done);
261 schedule_preempt_disabled();
262 preempt_enable();
263
264 ret = -EINTR;
265 if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
266 cgroup_kthread_ready();
267 __kthread_parkme(self);
268 ret = threadfn(data);
269 }
270 do_exit(ret);
271}
272
273
274int tsk_fork_get_node(struct task_struct *tsk)
275{
276#ifdef CONFIG_NUMA
277 if (tsk == kthreadd_task)
278 return tsk->pref_node_fork;
279#endif
280 return NUMA_NO_NODE;
281}
282
283static void create_kthread(struct kthread_create_info *create)
284{
285 int pid;
286
287#ifdef CONFIG_NUMA
288 current->pref_node_fork = create->node;
289#endif
290
291 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
292 if (pid < 0) {
293
294 struct completion *done = xchg(&create->done, NULL);
295
296 if (!done) {
297 kfree(create);
298 return;
299 }
300 create->result = ERR_PTR(pid);
301 complete(done);
302 }
303}
304
305static __printf(4, 0)
306struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
307 void *data, int node,
308 const char namefmt[],
309 va_list args)
310{
311 DECLARE_COMPLETION_ONSTACK(done);
312 struct task_struct *task;
313 struct kthread_create_info *create = kmalloc(sizeof(*create),
314 GFP_KERNEL);
315
316 if (!create)
317 return ERR_PTR(-ENOMEM);
318 create->threadfn = threadfn;
319 create->data = data;
320 create->node = node;
321 create->done = &done;
322
323 spin_lock(&kthread_create_lock);
324 list_add_tail(&create->list, &kthread_create_list);
325 spin_unlock(&kthread_create_lock);
326
327 wake_up_process(kthreadd_task);
328
329
330
331
332
333 if (unlikely(wait_for_completion_killable(&done))) {
334
335
336
337
338
339 if (xchg(&create->done, NULL))
340 return ERR_PTR(-EINTR);
341
342
343
344
345 wait_for_completion(&done);
346 }
347 task = create->result;
348 if (!IS_ERR(task)) {
349 static const struct sched_param param = { .sched_priority = 0 };
350 char name[TASK_COMM_LEN];
351
352
353
354
355
356 vsnprintf(name, sizeof(name), namefmt, args);
357 set_task_comm(task, name);
358
359
360
361
362 sched_setscheduler_nocheck(task, SCHED_NORMAL, ¶m);
363 set_cpus_allowed_ptr(task, cpu_all_mask);
364 }
365 kfree(create);
366 return task;
367}
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
393 void *data, int node,
394 const char namefmt[],
395 ...)
396{
397 struct task_struct *task;
398 va_list args;
399
400 va_start(args, namefmt);
401 task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
402 va_end(args);
403
404 return task;
405}
406EXPORT_SYMBOL(kthread_create_on_node);
407
408static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
409{
410 unsigned long flags;
411
412 if (!wait_task_inactive(p, state)) {
413 WARN_ON(1);
414 return;
415 }
416
417
418 raw_spin_lock_irqsave(&p->pi_lock, flags);
419 do_set_cpus_allowed(p, mask);
420 p->flags |= PF_NO_SETAFFINITY;
421 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
422}
423
424static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
425{
426 __kthread_bind_mask(p, cpumask_of(cpu), state);
427}
428
429void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
430{
431 __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
432}
433
434
435
436
437
438
439
440
441
442
443void kthread_bind(struct task_struct *p, unsigned int cpu)
444{
445 __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
446}
447EXPORT_SYMBOL(kthread_bind);
448
449
450
451
452
453
454
455
456
457
458
459
460struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
461 void *data, unsigned int cpu,
462 const char *namefmt)
463{
464 struct task_struct *p;
465
466 p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
467 cpu);
468 if (IS_ERR(p))
469 return p;
470 kthread_bind(p, cpu);
471
472 set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
473 to_kthread(p)->cpu = cpu;
474 return p;
475}
476
477
478
479
480
481
482
483
484
485void kthread_unpark(struct task_struct *k)
486{
487 struct kthread *kthread = to_kthread(k);
488
489
490
491
492
493 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
494 __kthread_bind(k, kthread->cpu, TASK_PARKED);
495
496 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
497
498
499
500 wake_up_state(k, TASK_PARKED);
501}
502EXPORT_SYMBOL_GPL(kthread_unpark);
503
504
505
506
507
508
509
510
511
512
513
514
515
516int kthread_park(struct task_struct *k)
517{
518 struct kthread *kthread = to_kthread(k);
519
520 if (WARN_ON(k->flags & PF_EXITING))
521 return -ENOSYS;
522
523 if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
524 return -EBUSY;
525
526 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
527 if (k != current) {
528 wake_up_process(k);
529
530
531
532
533 wait_for_completion(&kthread->parked);
534
535
536
537
538 WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
539 }
540
541 return 0;
542}
543EXPORT_SYMBOL_GPL(kthread_park);
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560int kthread_stop(struct task_struct *k)
561{
562 struct kthread *kthread;
563 int ret;
564
565 trace_sched_kthread_stop(k);
566
567 get_task_struct(k);
568 kthread = to_kthread(k);
569 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
570 kthread_unpark(k);
571 wake_up_process(k);
572 wait_for_completion(&kthread->exited);
573 ret = k->exit_code;
574 put_task_struct(k);
575
576 trace_sched_kthread_stop_ret(ret);
577 return ret;
578}
579EXPORT_SYMBOL(kthread_stop);
580
581int kthreadd(void *unused)
582{
583 struct task_struct *tsk = current;
584
585
586 set_task_comm(tsk, "kthreadd");
587 ignore_signals(tsk);
588 set_cpus_allowed_ptr(tsk, cpu_all_mask);
589 set_mems_allowed(node_states[N_MEMORY]);
590
591 current->flags |= PF_NOFREEZE;
592 cgroup_init_kthreadd();
593
594 for (;;) {
595 set_current_state(TASK_INTERRUPTIBLE);
596 if (list_empty(&kthread_create_list))
597 schedule();
598 __set_current_state(TASK_RUNNING);
599
600 spin_lock(&kthread_create_lock);
601 while (!list_empty(&kthread_create_list)) {
602 struct kthread_create_info *create;
603
604 create = list_entry(kthread_create_list.next,
605 struct kthread_create_info, list);
606 list_del_init(&create->list);
607 spin_unlock(&kthread_create_lock);
608
609 create_kthread(create);
610
611 spin_lock(&kthread_create_lock);
612 }
613 spin_unlock(&kthread_create_lock);
614 }
615
616 return 0;
617}
618
619void __kthread_init_worker(struct kthread_worker *worker,
620 const char *name,
621 struct lock_class_key *key)
622{
623 memset(worker, 0, sizeof(struct kthread_worker));
624 raw_spin_lock_init(&worker->lock);
625 lockdep_set_class_and_name(&worker->lock, key, name);
626 INIT_LIST_HEAD(&worker->work_list);
627 INIT_LIST_HEAD(&worker->delayed_work_list);
628}
629EXPORT_SYMBOL_GPL(__kthread_init_worker);
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646int kthread_worker_fn(void *worker_ptr)
647{
648 struct kthread_worker *worker = worker_ptr;
649 struct kthread_work *work;
650
651
652
653
654
655 WARN_ON(worker->task && worker->task != current);
656 worker->task = current;
657
658 if (worker->flags & KTW_FREEZABLE)
659 set_freezable();
660
661repeat:
662 set_current_state(TASK_INTERRUPTIBLE);
663
664 if (kthread_should_stop()) {
665 __set_current_state(TASK_RUNNING);
666 raw_spin_lock_irq(&worker->lock);
667 worker->task = NULL;
668 raw_spin_unlock_irq(&worker->lock);
669 return 0;
670 }
671
672 work = NULL;
673 raw_spin_lock_irq(&worker->lock);
674 if (!list_empty(&worker->work_list)) {
675 work = list_first_entry(&worker->work_list,
676 struct kthread_work, node);
677 list_del_init(&work->node);
678 }
679 worker->current_work = work;
680 raw_spin_unlock_irq(&worker->lock);
681
682 if (work) {
683 __set_current_state(TASK_RUNNING);
684 work->func(work);
685 } else if (!freezing(current))
686 schedule();
687
688 try_to_freeze();
689 cond_resched();
690 goto repeat;
691}
692EXPORT_SYMBOL_GPL(kthread_worker_fn);
693
694static __printf(3, 0) struct kthread_worker *
695__kthread_create_worker(int cpu, unsigned int flags,
696 const char namefmt[], va_list args)
697{
698 struct kthread_worker *worker;
699 struct task_struct *task;
700 int node = NUMA_NO_NODE;
701
702 worker = kzalloc(sizeof(*worker), GFP_KERNEL);
703 if (!worker)
704 return ERR_PTR(-ENOMEM);
705
706 kthread_init_worker(worker);
707
708 if (cpu >= 0)
709 node = cpu_to_node(cpu);
710
711 task = __kthread_create_on_node(kthread_worker_fn, worker,
712 node, namefmt, args);
713 if (IS_ERR(task))
714 goto fail_task;
715
716 if (cpu >= 0)
717 kthread_bind(task, cpu);
718
719 worker->flags = flags;
720 worker->task = task;
721 wake_up_process(task);
722 return worker;
723
724fail_task:
725 kfree(worker);
726 return ERR_CAST(task);
727}
728
729
730
731
732
733
734
735
736
737
738struct kthread_worker *
739kthread_create_worker(unsigned int flags, const char namefmt[], ...)
740{
741 struct kthread_worker *worker;
742 va_list args;
743
744 va_start(args, namefmt);
745 worker = __kthread_create_worker(-1, flags, namefmt, args);
746 va_end(args);
747
748 return worker;
749}
750EXPORT_SYMBOL(kthread_create_worker);
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769struct kthread_worker *
770kthread_create_worker_on_cpu(int cpu, unsigned int flags,
771 const char namefmt[], ...)
772{
773 struct kthread_worker *worker;
774 va_list args;
775
776 va_start(args, namefmt);
777 worker = __kthread_create_worker(cpu, flags, namefmt, args);
778 va_end(args);
779
780 return worker;
781}
782EXPORT_SYMBOL(kthread_create_worker_on_cpu);
783
784
785
786
787
788
789static inline bool queuing_blocked(struct kthread_worker *worker,
790 struct kthread_work *work)
791{
792 lockdep_assert_held(&worker->lock);
793
794 return !list_empty(&work->node) || work->canceling;
795}
796
797static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
798 struct kthread_work *work)
799{
800 lockdep_assert_held(&worker->lock);
801 WARN_ON_ONCE(!list_empty(&work->node));
802
803 WARN_ON_ONCE(work->worker && work->worker != worker);
804}
805
806
807static void kthread_insert_work(struct kthread_worker *worker,
808 struct kthread_work *work,
809 struct list_head *pos)
810{
811 kthread_insert_work_sanity_check(worker, work);
812
813 list_add_tail(&work->node, pos);
814 work->worker = worker;
815 if (!worker->current_work && likely(worker->task))
816 wake_up_process(worker->task);
817}
818
819
820
821
822
823
824
825
826
827
828
829
830
831bool kthread_queue_work(struct kthread_worker *worker,
832 struct kthread_work *work)
833{
834 bool ret = false;
835 unsigned long flags;
836
837 raw_spin_lock_irqsave(&worker->lock, flags);
838 if (!queuing_blocked(worker, work)) {
839 kthread_insert_work(worker, work, &worker->work_list);
840 ret = true;
841 }
842 raw_spin_unlock_irqrestore(&worker->lock, flags);
843 return ret;
844}
845EXPORT_SYMBOL_GPL(kthread_queue_work);
846
847
848
849
850
851
852
853
854
855void kthread_delayed_work_timer_fn(struct timer_list *t)
856{
857 struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
858 struct kthread_work *work = &dwork->work;
859 struct kthread_worker *worker = work->worker;
860 unsigned long flags;
861
862
863
864
865
866 if (WARN_ON_ONCE(!worker))
867 return;
868
869 raw_spin_lock_irqsave(&worker->lock, flags);
870
871 WARN_ON_ONCE(work->worker != worker);
872
873
874 WARN_ON_ONCE(list_empty(&work->node));
875 list_del_init(&work->node);
876 kthread_insert_work(worker, work, &worker->work_list);
877
878 raw_spin_unlock_irqrestore(&worker->lock, flags);
879}
880EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
881
882static void __kthread_queue_delayed_work(struct kthread_worker *worker,
883 struct kthread_delayed_work *dwork,
884 unsigned long delay)
885{
886 struct timer_list *timer = &dwork->timer;
887 struct kthread_work *work = &dwork->work;
888
889 WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
890
891
892
893
894
895
896
897 if (!delay) {
898 kthread_insert_work(worker, work, &worker->work_list);
899 return;
900 }
901
902
903 kthread_insert_work_sanity_check(worker, work);
904
905 list_add(&work->node, &worker->delayed_work_list);
906 work->worker = worker;
907 timer->expires = jiffies + delay;
908 add_timer(timer);
909}
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926bool kthread_queue_delayed_work(struct kthread_worker *worker,
927 struct kthread_delayed_work *dwork,
928 unsigned long delay)
929{
930 struct kthread_work *work = &dwork->work;
931 unsigned long flags;
932 bool ret = false;
933
934 raw_spin_lock_irqsave(&worker->lock, flags);
935
936 if (!queuing_blocked(worker, work)) {
937 __kthread_queue_delayed_work(worker, dwork, delay);
938 ret = true;
939 }
940
941 raw_spin_unlock_irqrestore(&worker->lock, flags);
942 return ret;
943}
944EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
945
946struct kthread_flush_work {
947 struct kthread_work work;
948 struct completion done;
949};
950
951static void kthread_flush_work_fn(struct kthread_work *work)
952{
953 struct kthread_flush_work *fwork =
954 container_of(work, struct kthread_flush_work, work);
955 complete(&fwork->done);
956}
957
958
959
960
961
962
963
964void kthread_flush_work(struct kthread_work *work)
965{
966 struct kthread_flush_work fwork = {
967 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
968 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
969 };
970 struct kthread_worker *worker;
971 bool noop = false;
972
973 worker = work->worker;
974 if (!worker)
975 return;
976
977 raw_spin_lock_irq(&worker->lock);
978
979 WARN_ON_ONCE(work->worker != worker);
980
981 if (!list_empty(&work->node))
982 kthread_insert_work(worker, &fwork.work, work->node.next);
983 else if (worker->current_work == work)
984 kthread_insert_work(worker, &fwork.work,
985 worker->work_list.next);
986 else
987 noop = true;
988
989 raw_spin_unlock_irq(&worker->lock);
990
991 if (!noop)
992 wait_for_completion(&fwork.done);
993}
994EXPORT_SYMBOL_GPL(kthread_flush_work);
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
1007 unsigned long *flags)
1008{
1009
1010 if (is_dwork) {
1011 struct kthread_delayed_work *dwork =
1012 container_of(work, struct kthread_delayed_work, work);
1013 struct kthread_worker *worker = work->worker;
1014
1015
1016
1017
1018
1019
1020
1021 work->canceling++;
1022 raw_spin_unlock_irqrestore(&worker->lock, *flags);
1023 del_timer_sync(&dwork->timer);
1024 raw_spin_lock_irqsave(&worker->lock, *flags);
1025 work->canceling--;
1026 }
1027
1028
1029
1030
1031
1032 if (!list_empty(&work->node)) {
1033 list_del_init(&work->node);
1034 return true;
1035 }
1036
1037 return false;
1038}
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063bool kthread_mod_delayed_work(struct kthread_worker *worker,
1064 struct kthread_delayed_work *dwork,
1065 unsigned long delay)
1066{
1067 struct kthread_work *work = &dwork->work;
1068 unsigned long flags;
1069 int ret = false;
1070
1071 raw_spin_lock_irqsave(&worker->lock, flags);
1072
1073
1074 if (!work->worker)
1075 goto fast_queue;
1076
1077
1078 WARN_ON_ONCE(work->worker != worker);
1079
1080
1081 if (work->canceling)
1082 goto out;
1083
1084 ret = __kthread_cancel_work(work, true, &flags);
1085fast_queue:
1086 __kthread_queue_delayed_work(worker, dwork, delay);
1087out:
1088 raw_spin_unlock_irqrestore(&worker->lock, flags);
1089 return ret;
1090}
1091EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
1092
1093static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1094{
1095 struct kthread_worker *worker = work->worker;
1096 unsigned long flags;
1097 int ret = false;
1098
1099 if (!worker)
1100 goto out;
1101
1102 raw_spin_lock_irqsave(&worker->lock, flags);
1103
1104 WARN_ON_ONCE(work->worker != worker);
1105
1106 ret = __kthread_cancel_work(work, is_dwork, &flags);
1107
1108 if (worker->current_work != work)
1109 goto out_fast;
1110
1111
1112
1113
1114
1115 work->canceling++;
1116 raw_spin_unlock_irqrestore(&worker->lock, flags);
1117 kthread_flush_work(work);
1118 raw_spin_lock_irqsave(&worker->lock, flags);
1119 work->canceling--;
1120
1121out_fast:
1122 raw_spin_unlock_irqrestore(&worker->lock, flags);
1123out:
1124 return ret;
1125}
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143bool kthread_cancel_work_sync(struct kthread_work *work)
1144{
1145 return __kthread_cancel_work_sync(work, false);
1146}
1147EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
1159{
1160 return __kthread_cancel_work_sync(&dwork->work, true);
1161}
1162EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
1163
1164
1165
1166
1167
1168
1169
1170
1171void kthread_flush_worker(struct kthread_worker *worker)
1172{
1173 struct kthread_flush_work fwork = {
1174 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1175 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1176 };
1177
1178 kthread_queue_work(worker, &fwork.work);
1179 wait_for_completion(&fwork.done);
1180}
1181EXPORT_SYMBOL_GPL(kthread_flush_worker);
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191void kthread_destroy_worker(struct kthread_worker *worker)
1192{
1193 struct task_struct *task;
1194
1195 task = worker->task;
1196 if (WARN_ON(!task))
1197 return;
1198
1199 kthread_flush_worker(worker);
1200 kthread_stop(task);
1201 WARN_ON(!list_empty(&worker->work_list));
1202 kfree(worker);
1203}
1204EXPORT_SYMBOL(kthread_destroy_worker);
1205
1206#ifdef CONFIG_BLK_CGROUP
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217void kthread_associate_blkcg(struct cgroup_subsys_state *css)
1218{
1219 struct kthread *kthread;
1220
1221 if (!(current->flags & PF_KTHREAD))
1222 return;
1223 kthread = to_kthread(current);
1224 if (!kthread)
1225 return;
1226
1227 if (kthread->blkcg_css) {
1228 css_put(kthread->blkcg_css);
1229 kthread->blkcg_css = NULL;
1230 }
1231 if (css) {
1232 css_get(css);
1233 kthread->blkcg_css = css;
1234 }
1235}
1236EXPORT_SYMBOL(kthread_associate_blkcg);
1237
1238
1239
1240
1241
1242
1243struct cgroup_subsys_state *kthread_blkcg(void)
1244{
1245 struct kthread *kthread;
1246
1247 if (current->flags & PF_KTHREAD) {
1248 kthread = to_kthread(current);
1249 if (kthread)
1250 return kthread->blkcg_css;
1251 }
1252 return NULL;
1253}
1254EXPORT_SYMBOL(kthread_blkcg);
1255#endif
1256