1
2
3
4
5
6
7
8
9
10#include <uapi/linux/sched/types.h>
11#include <linux/mm.h>
12#include <linux/mmu_context.h>
13#include <linux/sched.h>
14#include <linux/sched/mm.h>
15#include <linux/sched/task.h>
16#include <linux/kthread.h>
17#include <linux/completion.h>
18#include <linux/err.h>
19#include <linux/cgroup.h>
20#include <linux/cpuset.h>
21#include <linux/unistd.h>
22#include <linux/file.h>
23#include <linux/export.h>
24#include <linux/mutex.h>
25#include <linux/slab.h>
26#include <linux/freezer.h>
27#include <linux/ptrace.h>
28#include <linux/uaccess.h>
29#include <linux/numa.h>
30#include <linux/sched/isolation.h>
31#include <trace/events/sched.h>
32
33
34static DEFINE_SPINLOCK(kthread_create_lock);
35static LIST_HEAD(kthread_create_list);
36struct task_struct *kthreadd_task;
37
38struct kthread_create_info
39{
40
41 int (*threadfn)(void *data);
42 void *data;
43 int node;
44
45
46 struct task_struct *result;
47 struct completion *done;
48
49 struct list_head list;
50};
51
52struct kthread {
53 unsigned long flags;
54 unsigned int cpu;
55 int (*threadfn)(void *);
56 void *data;
57 mm_segment_t oldfs;
58 struct completion parked;
59 struct completion exited;
60#ifdef CONFIG_BLK_CGROUP
61 struct cgroup_subsys_state *blkcg_css;
62#endif
63};
64
65enum KTHREAD_BITS {
66 KTHREAD_IS_PER_CPU = 0,
67 KTHREAD_SHOULD_STOP,
68 KTHREAD_SHOULD_PARK,
69};
70
71static inline void set_kthread_struct(void *kthread)
72{
73
74
75
76
77
78 current->set_child_tid = (__force void __user *)kthread;
79}
80
81static inline struct kthread *to_kthread(struct task_struct *k)
82{
83 WARN_ON(!(k->flags & PF_KTHREAD));
84 return (__force void *)k->set_child_tid;
85}
86
87void free_kthread_struct(struct task_struct *k)
88{
89 struct kthread *kthread;
90
91
92
93
94
95 kthread = to_kthread(k);
96#ifdef CONFIG_BLK_CGROUP
97 WARN_ON_ONCE(kthread && kthread->blkcg_css);
98#endif
99 kfree(kthread);
100}
101
102
103
104
105
106
107
108
109bool kthread_should_stop(void)
110{
111 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
112}
113EXPORT_SYMBOL(kthread_should_stop);
114
115bool __kthread_should_park(struct task_struct *k)
116{
117 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
118}
119EXPORT_SYMBOL_GPL(__kthread_should_park);
120
121
122
123
124
125
126
127
128
129
130
131
132bool kthread_should_park(void)
133{
134 return __kthread_should_park(current);
135}
136EXPORT_SYMBOL_GPL(kthread_should_park);
137
138
139
140
141
142
143
144
145
146
147bool kthread_freezable_should_stop(bool *was_frozen)
148{
149 bool frozen = false;
150
151 might_sleep();
152
153 if (unlikely(freezing(current)))
154 frozen = __refrigerator(true);
155
156 if (was_frozen)
157 *was_frozen = frozen;
158
159 return kthread_should_stop();
160}
161EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
162
163
164
165
166
167
168
169void *kthread_func(struct task_struct *task)
170{
171 if (task->flags & PF_KTHREAD)
172 return to_kthread(task)->threadfn;
173 return NULL;
174}
175EXPORT_SYMBOL_GPL(kthread_func);
176
177
178
179
180
181
182
183
184
185void *kthread_data(struct task_struct *task)
186{
187 return to_kthread(task)->data;
188}
189EXPORT_SYMBOL_GPL(kthread_data);
190
191
192
193
194
195
196
197
198
199
200void *kthread_probe_data(struct task_struct *task)
201{
202 struct kthread *kthread = to_kthread(task);
203 void *data = NULL;
204
205 copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
206 return data;
207}
208
209static void __kthread_parkme(struct kthread *self)
210{
211 for (;;) {
212
213
214
215
216
217
218
219
220
221 set_special_state(TASK_PARKED);
222 if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
223 break;
224
225
226
227
228
229
230 preempt_disable();
231 complete(&self->parked);
232 schedule_preempt_disabled();
233 preempt_enable();
234 }
235 __set_current_state(TASK_RUNNING);
236}
237
238void kthread_parkme(void)
239{
240 __kthread_parkme(to_kthread(current));
241}
242EXPORT_SYMBOL_GPL(kthread_parkme);
243
244static int kthread(void *_create)
245{
246
247 struct kthread_create_info *create = _create;
248 int (*threadfn)(void *data) = create->threadfn;
249 void *data = create->data;
250 struct completion *done;
251 struct kthread *self;
252 int ret;
253
254 self = kzalloc(sizeof(*self), GFP_KERNEL);
255 set_kthread_struct(self);
256
257
258 done = xchg(&create->done, NULL);
259 if (!done) {
260 kfree(create);
261 do_exit(-EINTR);
262 }
263
264 if (!self) {
265 create->result = ERR_PTR(-ENOMEM);
266 complete(done);
267 do_exit(-ENOMEM);
268 }
269
270 self->threadfn = threadfn;
271 self->data = data;
272 init_completion(&self->exited);
273 init_completion(&self->parked);
274 current->vfork_done = &self->exited;
275
276
277 __set_current_state(TASK_UNINTERRUPTIBLE);
278 create->result = current;
279
280
281
282
283 preempt_disable();
284 complete(done);
285 schedule_preempt_disabled();
286 preempt_enable();
287
288 ret = -EINTR;
289 if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
290 cgroup_kthread_ready();
291 __kthread_parkme(self);
292 ret = threadfn(data);
293 }
294 do_exit(ret);
295}
296
297
298int tsk_fork_get_node(struct task_struct *tsk)
299{
300#ifdef CONFIG_NUMA
301 if (tsk == kthreadd_task)
302 return tsk->pref_node_fork;
303#endif
304 return NUMA_NO_NODE;
305}
306
307static void create_kthread(struct kthread_create_info *create)
308{
309 int pid;
310
311#ifdef CONFIG_NUMA
312 current->pref_node_fork = create->node;
313#endif
314
315 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
316 if (pid < 0) {
317
318 struct completion *done = xchg(&create->done, NULL);
319
320 if (!done) {
321 kfree(create);
322 return;
323 }
324 create->result = ERR_PTR(pid);
325 complete(done);
326 }
327}
328
329static __printf(4, 0)
330struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
331 void *data, int node,
332 const char namefmt[],
333 va_list args)
334{
335 DECLARE_COMPLETION_ONSTACK(done);
336 struct task_struct *task;
337 struct kthread_create_info *create = kmalloc(sizeof(*create),
338 GFP_KERNEL);
339
340 if (!create)
341 return ERR_PTR(-ENOMEM);
342 create->threadfn = threadfn;
343 create->data = data;
344 create->node = node;
345 create->done = &done;
346
347 spin_lock(&kthread_create_lock);
348 list_add_tail(&create->list, &kthread_create_list);
349 spin_unlock(&kthread_create_lock);
350
351 wake_up_process(kthreadd_task);
352
353
354
355
356
357 if (unlikely(wait_for_completion_killable(&done))) {
358
359
360
361
362
363 if (xchg(&create->done, NULL))
364 return ERR_PTR(-EINTR);
365
366
367
368
369 wait_for_completion(&done);
370 }
371 task = create->result;
372 if (!IS_ERR(task)) {
373 static const struct sched_param param = { .sched_priority = 0 };
374 char name[TASK_COMM_LEN];
375
376
377
378
379
380 vsnprintf(name, sizeof(name), namefmt, args);
381 set_task_comm(task, name);
382
383
384
385
386 sched_setscheduler_nocheck(task, SCHED_NORMAL, ¶m);
387 set_cpus_allowed_ptr(task,
388 housekeeping_cpumask(HK_FLAG_KTHREAD));
389 }
390 kfree(create);
391 return task;
392}
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
418 void *data, int node,
419 const char namefmt[],
420 ...)
421{
422 struct task_struct *task;
423 va_list args;
424
425 va_start(args, namefmt);
426 task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
427 va_end(args);
428
429 return task;
430}
431EXPORT_SYMBOL(kthread_create_on_node);
432
433static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
434{
435 unsigned long flags;
436
437 if (!wait_task_inactive(p, state)) {
438 WARN_ON(1);
439 return;
440 }
441
442
443 raw_spin_lock_irqsave(&p->pi_lock, flags);
444 do_set_cpus_allowed(p, mask);
445 p->flags |= PF_NO_SETAFFINITY;
446 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
447}
448
449static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
450{
451 __kthread_bind_mask(p, cpumask_of(cpu), state);
452}
453
454void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
455{
456 __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
457}
458
459
460
461
462
463
464
465
466
467
468void kthread_bind(struct task_struct *p, unsigned int cpu)
469{
470 __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
471}
472EXPORT_SYMBOL(kthread_bind);
473
474
475
476
477
478
479
480
481
482
483
484struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
485 void *data, unsigned int cpu,
486 const char *namefmt)
487{
488 struct task_struct *p;
489
490 p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
491 cpu);
492 if (IS_ERR(p))
493 return p;
494 kthread_bind(p, cpu);
495
496 set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
497 to_kthread(p)->cpu = cpu;
498 return p;
499}
500
501
502
503
504
505
506
507
508
509void kthread_unpark(struct task_struct *k)
510{
511 struct kthread *kthread = to_kthread(k);
512
513
514
515
516
517 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
518 __kthread_bind(k, kthread->cpu, TASK_PARKED);
519
520 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
521
522
523
524 wake_up_state(k, TASK_PARKED);
525}
526EXPORT_SYMBOL_GPL(kthread_unpark);
527
528
529
530
531
532
533
534
535
536
537
538
539
540int kthread_park(struct task_struct *k)
541{
542 struct kthread *kthread = to_kthread(k);
543
544 if (WARN_ON(k->flags & PF_EXITING))
545 return -ENOSYS;
546
547 if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
548 return -EBUSY;
549
550 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
551 if (k != current) {
552 wake_up_process(k);
553
554
555
556
557 wait_for_completion(&kthread->parked);
558
559
560
561
562 WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
563 }
564
565 return 0;
566}
567EXPORT_SYMBOL_GPL(kthread_park);
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584int kthread_stop(struct task_struct *k)
585{
586 struct kthread *kthread;
587 int ret;
588
589 trace_sched_kthread_stop(k);
590
591 get_task_struct(k);
592 kthread = to_kthread(k);
593 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
594 kthread_unpark(k);
595 wake_up_process(k);
596 wait_for_completion(&kthread->exited);
597 ret = k->exit_code;
598 put_task_struct(k);
599
600 trace_sched_kthread_stop_ret(ret);
601 return ret;
602}
603EXPORT_SYMBOL(kthread_stop);
604
605int kthreadd(void *unused)
606{
607 struct task_struct *tsk = current;
608
609
610 set_task_comm(tsk, "kthreadd");
611 ignore_signals(tsk);
612 set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_FLAG_KTHREAD));
613 set_mems_allowed(node_states[N_MEMORY]);
614
615 current->flags |= PF_NOFREEZE;
616 cgroup_init_kthreadd();
617
618 for (;;) {
619 set_current_state(TASK_INTERRUPTIBLE);
620 if (list_empty(&kthread_create_list))
621 schedule();
622 __set_current_state(TASK_RUNNING);
623
624 spin_lock(&kthread_create_lock);
625 while (!list_empty(&kthread_create_list)) {
626 struct kthread_create_info *create;
627
628 create = list_entry(kthread_create_list.next,
629 struct kthread_create_info, list);
630 list_del_init(&create->list);
631 spin_unlock(&kthread_create_lock);
632
633 create_kthread(create);
634
635 spin_lock(&kthread_create_lock);
636 }
637 spin_unlock(&kthread_create_lock);
638 }
639
640 return 0;
641}
642
643void __kthread_init_worker(struct kthread_worker *worker,
644 const char *name,
645 struct lock_class_key *key)
646{
647 memset(worker, 0, sizeof(struct kthread_worker));
648 raw_spin_lock_init(&worker->lock);
649 lockdep_set_class_and_name(&worker->lock, key, name);
650 INIT_LIST_HEAD(&worker->work_list);
651 INIT_LIST_HEAD(&worker->delayed_work_list);
652}
653EXPORT_SYMBOL_GPL(__kthread_init_worker);
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670int kthread_worker_fn(void *worker_ptr)
671{
672 struct kthread_worker *worker = worker_ptr;
673 struct kthread_work *work;
674
675
676
677
678
679 WARN_ON(worker->task && worker->task != current);
680 worker->task = current;
681
682 if (worker->flags & KTW_FREEZABLE)
683 set_freezable();
684
685repeat:
686 set_current_state(TASK_INTERRUPTIBLE);
687
688 if (kthread_should_stop()) {
689 __set_current_state(TASK_RUNNING);
690 raw_spin_lock_irq(&worker->lock);
691 worker->task = NULL;
692 raw_spin_unlock_irq(&worker->lock);
693 return 0;
694 }
695
696 work = NULL;
697 raw_spin_lock_irq(&worker->lock);
698 if (!list_empty(&worker->work_list)) {
699 work = list_first_entry(&worker->work_list,
700 struct kthread_work, node);
701 list_del_init(&work->node);
702 }
703 worker->current_work = work;
704 raw_spin_unlock_irq(&worker->lock);
705
706 if (work) {
707 __set_current_state(TASK_RUNNING);
708 work->func(work);
709 } else if (!freezing(current))
710 schedule();
711
712 try_to_freeze();
713 cond_resched();
714 goto repeat;
715}
716EXPORT_SYMBOL_GPL(kthread_worker_fn);
717
718static __printf(3, 0) struct kthread_worker *
719__kthread_create_worker(int cpu, unsigned int flags,
720 const char namefmt[], va_list args)
721{
722 struct kthread_worker *worker;
723 struct task_struct *task;
724 int node = NUMA_NO_NODE;
725
726 worker = kzalloc(sizeof(*worker), GFP_KERNEL);
727 if (!worker)
728 return ERR_PTR(-ENOMEM);
729
730 kthread_init_worker(worker);
731
732 if (cpu >= 0)
733 node = cpu_to_node(cpu);
734
735 task = __kthread_create_on_node(kthread_worker_fn, worker,
736 node, namefmt, args);
737 if (IS_ERR(task))
738 goto fail_task;
739
740 if (cpu >= 0)
741 kthread_bind(task, cpu);
742
743 worker->flags = flags;
744 worker->task = task;
745 wake_up_process(task);
746 return worker;
747
748fail_task:
749 kfree(worker);
750 return ERR_CAST(task);
751}
752
753
754
755
756
757
758
759
760
761
762struct kthread_worker *
763kthread_create_worker(unsigned int flags, const char namefmt[], ...)
764{
765 struct kthread_worker *worker;
766 va_list args;
767
768 va_start(args, namefmt);
769 worker = __kthread_create_worker(-1, flags, namefmt, args);
770 va_end(args);
771
772 return worker;
773}
774EXPORT_SYMBOL(kthread_create_worker);
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793struct kthread_worker *
794kthread_create_worker_on_cpu(int cpu, unsigned int flags,
795 const char namefmt[], ...)
796{
797 struct kthread_worker *worker;
798 va_list args;
799
800 va_start(args, namefmt);
801 worker = __kthread_create_worker(cpu, flags, namefmt, args);
802 va_end(args);
803
804 return worker;
805}
806EXPORT_SYMBOL(kthread_create_worker_on_cpu);
807
808
809
810
811
812
813static inline bool queuing_blocked(struct kthread_worker *worker,
814 struct kthread_work *work)
815{
816 lockdep_assert_held(&worker->lock);
817
818 return !list_empty(&work->node) || work->canceling;
819}
820
821static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
822 struct kthread_work *work)
823{
824 lockdep_assert_held(&worker->lock);
825 WARN_ON_ONCE(!list_empty(&work->node));
826
827 WARN_ON_ONCE(work->worker && work->worker != worker);
828}
829
830
831static void kthread_insert_work(struct kthread_worker *worker,
832 struct kthread_work *work,
833 struct list_head *pos)
834{
835 kthread_insert_work_sanity_check(worker, work);
836
837 list_add_tail(&work->node, pos);
838 work->worker = worker;
839 if (!worker->current_work && likely(worker->task))
840 wake_up_process(worker->task);
841}
842
843
844
845
846
847
848
849
850
851
852
853
854
855bool kthread_queue_work(struct kthread_worker *worker,
856 struct kthread_work *work)
857{
858 bool ret = false;
859 unsigned long flags;
860
861 raw_spin_lock_irqsave(&worker->lock, flags);
862 if (!queuing_blocked(worker, work)) {
863 kthread_insert_work(worker, work, &worker->work_list);
864 ret = true;
865 }
866 raw_spin_unlock_irqrestore(&worker->lock, flags);
867 return ret;
868}
869EXPORT_SYMBOL_GPL(kthread_queue_work);
870
871
872
873
874
875
876
877
878
879void kthread_delayed_work_timer_fn(struct timer_list *t)
880{
881 struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
882 struct kthread_work *work = &dwork->work;
883 struct kthread_worker *worker = work->worker;
884 unsigned long flags;
885
886
887
888
889
890 if (WARN_ON_ONCE(!worker))
891 return;
892
893 raw_spin_lock_irqsave(&worker->lock, flags);
894
895 WARN_ON_ONCE(work->worker != worker);
896
897
898 WARN_ON_ONCE(list_empty(&work->node));
899 list_del_init(&work->node);
900 if (!work->canceling)
901 kthread_insert_work(worker, work, &worker->work_list);
902
903 raw_spin_unlock_irqrestore(&worker->lock, flags);
904}
905EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
906
907static void __kthread_queue_delayed_work(struct kthread_worker *worker,
908 struct kthread_delayed_work *dwork,
909 unsigned long delay)
910{
911 struct timer_list *timer = &dwork->timer;
912 struct kthread_work *work = &dwork->work;
913
914 WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
915
916
917
918
919
920
921
922 if (!delay) {
923 kthread_insert_work(worker, work, &worker->work_list);
924 return;
925 }
926
927
928 kthread_insert_work_sanity_check(worker, work);
929
930 list_add(&work->node, &worker->delayed_work_list);
931 work->worker = worker;
932 timer->expires = jiffies + delay;
933 add_timer(timer);
934}
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951bool kthread_queue_delayed_work(struct kthread_worker *worker,
952 struct kthread_delayed_work *dwork,
953 unsigned long delay)
954{
955 struct kthread_work *work = &dwork->work;
956 unsigned long flags;
957 bool ret = false;
958
959 raw_spin_lock_irqsave(&worker->lock, flags);
960
961 if (!queuing_blocked(worker, work)) {
962 __kthread_queue_delayed_work(worker, dwork, delay);
963 ret = true;
964 }
965
966 raw_spin_unlock_irqrestore(&worker->lock, flags);
967 return ret;
968}
969EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
970
971struct kthread_flush_work {
972 struct kthread_work work;
973 struct completion done;
974};
975
976static void kthread_flush_work_fn(struct kthread_work *work)
977{
978 struct kthread_flush_work *fwork =
979 container_of(work, struct kthread_flush_work, work);
980 complete(&fwork->done);
981}
982
983
984
985
986
987
988
989void kthread_flush_work(struct kthread_work *work)
990{
991 struct kthread_flush_work fwork = {
992 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
993 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
994 };
995 struct kthread_worker *worker;
996 bool noop = false;
997
998 worker = work->worker;
999 if (!worker)
1000 return;
1001
1002 raw_spin_lock_irq(&worker->lock);
1003
1004 WARN_ON_ONCE(work->worker != worker);
1005
1006 if (!list_empty(&work->node))
1007 kthread_insert_work(worker, &fwork.work, work->node.next);
1008 else if (worker->current_work == work)
1009 kthread_insert_work(worker, &fwork.work,
1010 worker->work_list.next);
1011 else
1012 noop = true;
1013
1014 raw_spin_unlock_irq(&worker->lock);
1015
1016 if (!noop)
1017 wait_for_completion(&fwork.done);
1018}
1019EXPORT_SYMBOL_GPL(kthread_flush_work);
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
1032 unsigned long *flags)
1033{
1034
1035 if (is_dwork) {
1036 struct kthread_delayed_work *dwork =
1037 container_of(work, struct kthread_delayed_work, work);
1038 struct kthread_worker *worker = work->worker;
1039
1040
1041
1042
1043
1044
1045
1046 work->canceling++;
1047 raw_spin_unlock_irqrestore(&worker->lock, *flags);
1048 del_timer_sync(&dwork->timer);
1049 raw_spin_lock_irqsave(&worker->lock, *flags);
1050 work->canceling--;
1051 }
1052
1053
1054
1055
1056
1057 if (!list_empty(&work->node)) {
1058 list_del_init(&work->node);
1059 return true;
1060 }
1061
1062 return false;
1063}
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088bool kthread_mod_delayed_work(struct kthread_worker *worker,
1089 struct kthread_delayed_work *dwork,
1090 unsigned long delay)
1091{
1092 struct kthread_work *work = &dwork->work;
1093 unsigned long flags;
1094 int ret = false;
1095
1096 raw_spin_lock_irqsave(&worker->lock, flags);
1097
1098
1099 if (!work->worker)
1100 goto fast_queue;
1101
1102
1103 WARN_ON_ONCE(work->worker != worker);
1104
1105
1106 if (work->canceling)
1107 goto out;
1108
1109 ret = __kthread_cancel_work(work, true, &flags);
1110fast_queue:
1111 __kthread_queue_delayed_work(worker, dwork, delay);
1112out:
1113 raw_spin_unlock_irqrestore(&worker->lock, flags);
1114 return ret;
1115}
1116EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
1117
1118static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1119{
1120 struct kthread_worker *worker = work->worker;
1121 unsigned long flags;
1122 int ret = false;
1123
1124 if (!worker)
1125 goto out;
1126
1127 raw_spin_lock_irqsave(&worker->lock, flags);
1128
1129 WARN_ON_ONCE(work->worker != worker);
1130
1131 ret = __kthread_cancel_work(work, is_dwork, &flags);
1132
1133 if (worker->current_work != work)
1134 goto out_fast;
1135
1136
1137
1138
1139
1140 work->canceling++;
1141 raw_spin_unlock_irqrestore(&worker->lock, flags);
1142 kthread_flush_work(work);
1143 raw_spin_lock_irqsave(&worker->lock, flags);
1144 work->canceling--;
1145
1146out_fast:
1147 raw_spin_unlock_irqrestore(&worker->lock, flags);
1148out:
1149 return ret;
1150}
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168bool kthread_cancel_work_sync(struct kthread_work *work)
1169{
1170 return __kthread_cancel_work_sync(work, false);
1171}
1172EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
1184{
1185 return __kthread_cancel_work_sync(&dwork->work, true);
1186}
1187EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
1188
1189
1190
1191
1192
1193
1194
1195
1196void kthread_flush_worker(struct kthread_worker *worker)
1197{
1198 struct kthread_flush_work fwork = {
1199 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1200 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1201 };
1202
1203 kthread_queue_work(worker, &fwork.work);
1204 wait_for_completion(&fwork.done);
1205}
1206EXPORT_SYMBOL_GPL(kthread_flush_worker);
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216void kthread_destroy_worker(struct kthread_worker *worker)
1217{
1218 struct task_struct *task;
1219
1220 task = worker->task;
1221 if (WARN_ON(!task))
1222 return;
1223
1224 kthread_flush_worker(worker);
1225 kthread_stop(task);
1226 WARN_ON(!list_empty(&worker->work_list));
1227 kfree(worker);
1228}
1229EXPORT_SYMBOL(kthread_destroy_worker);
1230
1231
1232
1233
1234
1235void kthread_use_mm(struct mm_struct *mm)
1236{
1237 struct mm_struct *active_mm;
1238 struct task_struct *tsk = current;
1239
1240 WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1241 WARN_ON_ONCE(tsk->mm);
1242
1243 task_lock(tsk);
1244
1245 local_irq_disable();
1246 active_mm = tsk->active_mm;
1247 if (active_mm != mm) {
1248 mmgrab(mm);
1249 tsk->active_mm = mm;
1250 }
1251 tsk->mm = mm;
1252 switch_mm_irqs_off(active_mm, mm, tsk);
1253 local_irq_enable();
1254 task_unlock(tsk);
1255#ifdef finish_arch_post_lock_switch
1256 finish_arch_post_lock_switch();
1257#endif
1258
1259 if (active_mm != mm)
1260 mmdrop(active_mm);
1261
1262 to_kthread(tsk)->oldfs = force_uaccess_begin();
1263}
1264EXPORT_SYMBOL_GPL(kthread_use_mm);
1265
1266
1267
1268
1269
1270void kthread_unuse_mm(struct mm_struct *mm)
1271{
1272 struct task_struct *tsk = current;
1273
1274 WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1275 WARN_ON_ONCE(!tsk->mm);
1276
1277 force_uaccess_end(to_kthread(tsk)->oldfs);
1278
1279 task_lock(tsk);
1280 sync_mm_rss(mm);
1281 local_irq_disable();
1282 tsk->mm = NULL;
1283
1284 enter_lazy_tlb(mm, tsk);
1285 local_irq_enable();
1286 task_unlock(tsk);
1287}
1288EXPORT_SYMBOL_GPL(kthread_unuse_mm);
1289
1290#ifdef CONFIG_BLK_CGROUP
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301void kthread_associate_blkcg(struct cgroup_subsys_state *css)
1302{
1303 struct kthread *kthread;
1304
1305 if (!(current->flags & PF_KTHREAD))
1306 return;
1307 kthread = to_kthread(current);
1308 if (!kthread)
1309 return;
1310
1311 if (kthread->blkcg_css) {
1312 css_put(kthread->blkcg_css);
1313 kthread->blkcg_css = NULL;
1314 }
1315 if (css) {
1316 css_get(css);
1317 kthread->blkcg_css = css;
1318 }
1319}
1320EXPORT_SYMBOL(kthread_associate_blkcg);
1321
1322
1323
1324
1325
1326
1327struct cgroup_subsys_state *kthread_blkcg(void)
1328{
1329 struct kthread *kthread;
1330
1331 if (current->flags & PF_KTHREAD) {
1332 kthread = to_kthread(current);
1333 if (kthread)
1334 return kthread->blkcg_css;
1335 }
1336 return NULL;
1337}
1338EXPORT_SYMBOL(kthread_blkcg);
1339#endif
1340