1
2
3
4
5
6
7
8
9
10#include <uapi/linux/sched/types.h>
11#include <linux/mm.h>
12#include <linux/mmu_context.h>
13#include <linux/sched.h>
14#include <linux/sched/mm.h>
15#include <linux/sched/task.h>
16#include <linux/kthread.h>
17#include <linux/completion.h>
18#include <linux/err.h>
19#include <linux/cgroup.h>
20#include <linux/cpuset.h>
21#include <linux/unistd.h>
22#include <linux/file.h>
23#include <linux/export.h>
24#include <linux/mutex.h>
25#include <linux/slab.h>
26#include <linux/freezer.h>
27#include <linux/ptrace.h>
28#include <linux/uaccess.h>
29#include <linux/numa.h>
30#include <linux/sched/isolation.h>
31#include <trace/events/sched.h>
32
33
34static DEFINE_SPINLOCK(kthread_create_lock);
35static LIST_HEAD(kthread_create_list);
36struct task_struct *kthreadd_task;
37
38struct kthread_create_info
39{
40
41 int (*threadfn)(void *data);
42 void *data;
43 int node;
44
45
46 struct task_struct *result;
47 struct completion *done;
48
49 struct list_head list;
50};
51
52struct kthread {
53 unsigned long flags;
54 unsigned int cpu;
55 int (*threadfn)(void *);
56 void *data;
57 mm_segment_t oldfs;
58 struct completion parked;
59 struct completion exited;
60#ifdef CONFIG_BLK_CGROUP
61 struct cgroup_subsys_state *blkcg_css;
62#endif
63};
64
65enum KTHREAD_BITS {
66 KTHREAD_IS_PER_CPU = 0,
67 KTHREAD_SHOULD_STOP,
68 KTHREAD_SHOULD_PARK,
69};
70
71static inline struct kthread *to_kthread(struct task_struct *k)
72{
73 WARN_ON(!(k->flags & PF_KTHREAD));
74 return (__force void *)k->set_child_tid;
75}
76
77
78
79
80
81
82
83
84
85
86
87
88static inline struct kthread *__to_kthread(struct task_struct *p)
89{
90 void *kthread = (__force void *)p->set_child_tid;
91 if (kthread && !(p->flags & PF_KTHREAD))
92 kthread = NULL;
93 return kthread;
94}
95
96void set_kthread_struct(struct task_struct *p)
97{
98 struct kthread *kthread;
99
100 if (__to_kthread(p))
101 return;
102
103 kthread = kzalloc(sizeof(*kthread), GFP_KERNEL);
104
105
106
107
108
109 p->set_child_tid = (__force void __user *)kthread;
110}
111
112void free_kthread_struct(struct task_struct *k)
113{
114 struct kthread *kthread;
115
116
117
118
119
120 kthread = to_kthread(k);
121#ifdef CONFIG_BLK_CGROUP
122 WARN_ON_ONCE(kthread && kthread->blkcg_css);
123#endif
124 kfree(kthread);
125}
126
127
128
129
130
131
132
133
134bool kthread_should_stop(void)
135{
136 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
137}
138EXPORT_SYMBOL(kthread_should_stop);
139
140bool __kthread_should_park(struct task_struct *k)
141{
142 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
143}
144EXPORT_SYMBOL_GPL(__kthread_should_park);
145
146
147
148
149
150
151
152
153
154
155
156
157bool kthread_should_park(void)
158{
159 return __kthread_should_park(current);
160}
161EXPORT_SYMBOL_GPL(kthread_should_park);
162
163
164
165
166
167
168
169
170
171
172bool kthread_freezable_should_stop(bool *was_frozen)
173{
174 bool frozen = false;
175
176 might_sleep();
177
178 if (unlikely(freezing(current)))
179 frozen = __refrigerator(true);
180
181 if (was_frozen)
182 *was_frozen = frozen;
183
184 return kthread_should_stop();
185}
186EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
187
188
189
190
191
192
193
194void *kthread_func(struct task_struct *task)
195{
196 struct kthread *kthread = __to_kthread(task);
197 if (kthread)
198 return kthread->threadfn;
199 return NULL;
200}
201EXPORT_SYMBOL_GPL(kthread_func);
202
203
204
205
206
207
208
209
210
211void *kthread_data(struct task_struct *task)
212{
213 return to_kthread(task)->data;
214}
215EXPORT_SYMBOL_GPL(kthread_data);
216
217
218
219
220
221
222
223
224
225
226void *kthread_probe_data(struct task_struct *task)
227{
228 struct kthread *kthread = __to_kthread(task);
229 void *data = NULL;
230
231 if (kthread)
232 copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
233 return data;
234}
235
236static void __kthread_parkme(struct kthread *self)
237{
238 for (;;) {
239
240
241
242
243
244
245
246
247
248 set_special_state(TASK_PARKED);
249 if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
250 break;
251
252
253
254
255
256
257 preempt_disable();
258 complete(&self->parked);
259 schedule_preempt_disabled();
260 preempt_enable();
261 }
262 __set_current_state(TASK_RUNNING);
263}
264
265void kthread_parkme(void)
266{
267 __kthread_parkme(to_kthread(current));
268}
269EXPORT_SYMBOL_GPL(kthread_parkme);
270
271static int kthread(void *_create)
272{
273
274 struct kthread_create_info *create = _create;
275 int (*threadfn)(void *data) = create->threadfn;
276 void *data = create->data;
277 struct completion *done;
278 struct kthread *self;
279 int ret;
280
281 set_kthread_struct(current);
282 self = to_kthread(current);
283
284
285 done = xchg(&create->done, NULL);
286 if (!done) {
287 kfree(create);
288 do_exit(-EINTR);
289 }
290
291 if (!self) {
292 create->result = ERR_PTR(-ENOMEM);
293 complete(done);
294 do_exit(-ENOMEM);
295 }
296
297 self->threadfn = threadfn;
298 self->data = data;
299 init_completion(&self->exited);
300 init_completion(&self->parked);
301 current->vfork_done = &self->exited;
302
303
304 __set_current_state(TASK_UNINTERRUPTIBLE);
305 create->result = current;
306
307
308
309
310 preempt_disable();
311 complete(done);
312 schedule_preempt_disabled();
313 preempt_enable();
314
315 ret = -EINTR;
316 if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
317 cgroup_kthread_ready();
318 __kthread_parkme(self);
319 ret = threadfn(data);
320 }
321 do_exit(ret);
322}
323
324
325int tsk_fork_get_node(struct task_struct *tsk)
326{
327#ifdef CONFIG_NUMA
328 if (tsk == kthreadd_task)
329 return tsk->pref_node_fork;
330#endif
331 return NUMA_NO_NODE;
332}
333
334static void create_kthread(struct kthread_create_info *create)
335{
336 int pid;
337
338#ifdef CONFIG_NUMA
339 current->pref_node_fork = create->node;
340#endif
341
342 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
343 if (pid < 0) {
344
345 struct completion *done = xchg(&create->done, NULL);
346
347 if (!done) {
348 kfree(create);
349 return;
350 }
351 create->result = ERR_PTR(pid);
352 complete(done);
353 }
354}
355
356static __printf(4, 0)
357struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
358 void *data, int node,
359 const char namefmt[],
360 va_list args)
361{
362 DECLARE_COMPLETION_ONSTACK(done);
363 struct task_struct *task;
364 struct kthread_create_info *create = kmalloc(sizeof(*create),
365 GFP_KERNEL);
366
367 if (!create)
368 return ERR_PTR(-ENOMEM);
369 create->threadfn = threadfn;
370 create->data = data;
371 create->node = node;
372 create->done = &done;
373
374 spin_lock(&kthread_create_lock);
375 list_add_tail(&create->list, &kthread_create_list);
376 spin_unlock(&kthread_create_lock);
377
378 wake_up_process(kthreadd_task);
379
380
381
382
383
384 if (unlikely(wait_for_completion_killable(&done))) {
385
386
387
388
389
390 if (xchg(&create->done, NULL))
391 return ERR_PTR(-EINTR);
392
393
394
395
396 wait_for_completion(&done);
397 }
398 task = create->result;
399 if (!IS_ERR(task)) {
400 static const struct sched_param param = { .sched_priority = 0 };
401 char name[TASK_COMM_LEN];
402
403
404
405
406
407 vsnprintf(name, sizeof(name), namefmt, args);
408 set_task_comm(task, name);
409
410
411
412
413 sched_setscheduler_nocheck(task, SCHED_NORMAL, ¶m);
414 set_cpus_allowed_ptr(task,
415 housekeeping_cpumask(HK_FLAG_KTHREAD));
416 }
417 kfree(create);
418 return task;
419}
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
445 void *data, int node,
446 const char namefmt[],
447 ...)
448{
449 struct task_struct *task;
450 va_list args;
451
452 va_start(args, namefmt);
453 task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
454 va_end(args);
455
456 return task;
457}
458EXPORT_SYMBOL(kthread_create_on_node);
459
460static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, unsigned int state)
461{
462 unsigned long flags;
463
464 if (!wait_task_inactive(p, state)) {
465 WARN_ON(1);
466 return;
467 }
468
469
470 raw_spin_lock_irqsave(&p->pi_lock, flags);
471 do_set_cpus_allowed(p, mask);
472 p->flags |= PF_NO_SETAFFINITY;
473 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
474}
475
476static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int state)
477{
478 __kthread_bind_mask(p, cpumask_of(cpu), state);
479}
480
481void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
482{
483 __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
484}
485
486
487
488
489
490
491
492
493
494
495void kthread_bind(struct task_struct *p, unsigned int cpu)
496{
497 __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
498}
499EXPORT_SYMBOL(kthread_bind);
500
501
502
503
504
505
506
507
508
509
510
511struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
512 void *data, unsigned int cpu,
513 const char *namefmt)
514{
515 struct task_struct *p;
516
517 p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
518 cpu);
519 if (IS_ERR(p))
520 return p;
521 kthread_bind(p, cpu);
522
523 to_kthread(p)->cpu = cpu;
524 return p;
525}
526
527void kthread_set_per_cpu(struct task_struct *k, int cpu)
528{
529 struct kthread *kthread = to_kthread(k);
530 if (!kthread)
531 return;
532
533 WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
534
535 if (cpu < 0) {
536 clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
537 return;
538 }
539
540 kthread->cpu = cpu;
541 set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
542}
543
544bool kthread_is_per_cpu(struct task_struct *p)
545{
546 struct kthread *kthread = __to_kthread(p);
547 if (!kthread)
548 return false;
549
550 return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
551}
552
553
554
555
556
557
558
559
560
561void kthread_unpark(struct task_struct *k)
562{
563 struct kthread *kthread = to_kthread(k);
564
565
566
567
568
569 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
570 __kthread_bind(k, kthread->cpu, TASK_PARKED);
571
572 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
573
574
575
576 wake_up_state(k, TASK_PARKED);
577}
578EXPORT_SYMBOL_GPL(kthread_unpark);
579
580
581
582
583
584
585
586
587
588
589
590
591
592int kthread_park(struct task_struct *k)
593{
594 struct kthread *kthread = to_kthread(k);
595
596 if (WARN_ON(k->flags & PF_EXITING))
597 return -ENOSYS;
598
599 if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
600 return -EBUSY;
601
602 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
603 if (k != current) {
604 wake_up_process(k);
605
606
607
608
609 wait_for_completion(&kthread->parked);
610
611
612
613
614 WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
615 }
616
617 return 0;
618}
619EXPORT_SYMBOL_GPL(kthread_park);
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636int kthread_stop(struct task_struct *k)
637{
638 struct kthread *kthread;
639 int ret;
640
641 trace_sched_kthread_stop(k);
642
643 get_task_struct(k);
644 kthread = to_kthread(k);
645 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
646 kthread_unpark(k);
647 wake_up_process(k);
648 wait_for_completion(&kthread->exited);
649 ret = k->exit_code;
650 put_task_struct(k);
651
652 trace_sched_kthread_stop_ret(ret);
653 return ret;
654}
655EXPORT_SYMBOL(kthread_stop);
656
657int kthreadd(void *unused)
658{
659 struct task_struct *tsk = current;
660
661
662 set_task_comm(tsk, "kthreadd");
663 ignore_signals(tsk);
664 set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_FLAG_KTHREAD));
665 set_mems_allowed(node_states[N_MEMORY]);
666
667 current->flags |= PF_NOFREEZE;
668 cgroup_init_kthreadd();
669
670 for (;;) {
671 set_current_state(TASK_INTERRUPTIBLE);
672 if (list_empty(&kthread_create_list))
673 schedule();
674 __set_current_state(TASK_RUNNING);
675
676 spin_lock(&kthread_create_lock);
677 while (!list_empty(&kthread_create_list)) {
678 struct kthread_create_info *create;
679
680 create = list_entry(kthread_create_list.next,
681 struct kthread_create_info, list);
682 list_del_init(&create->list);
683 spin_unlock(&kthread_create_lock);
684
685 create_kthread(create);
686
687 spin_lock(&kthread_create_lock);
688 }
689 spin_unlock(&kthread_create_lock);
690 }
691
692 return 0;
693}
694
695void __kthread_init_worker(struct kthread_worker *worker,
696 const char *name,
697 struct lock_class_key *key)
698{
699 memset(worker, 0, sizeof(struct kthread_worker));
700 raw_spin_lock_init(&worker->lock);
701 lockdep_set_class_and_name(&worker->lock, key, name);
702 INIT_LIST_HEAD(&worker->work_list);
703 INIT_LIST_HEAD(&worker->delayed_work_list);
704}
705EXPORT_SYMBOL_GPL(__kthread_init_worker);
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722int kthread_worker_fn(void *worker_ptr)
723{
724 struct kthread_worker *worker = worker_ptr;
725 struct kthread_work *work;
726
727
728
729
730
731 WARN_ON(worker->task && worker->task != current);
732 worker->task = current;
733
734 if (worker->flags & KTW_FREEZABLE)
735 set_freezable();
736
737repeat:
738 set_current_state(TASK_INTERRUPTIBLE);
739
740 if (kthread_should_stop()) {
741 __set_current_state(TASK_RUNNING);
742 raw_spin_lock_irq(&worker->lock);
743 worker->task = NULL;
744 raw_spin_unlock_irq(&worker->lock);
745 return 0;
746 }
747
748 work = NULL;
749 raw_spin_lock_irq(&worker->lock);
750 if (!list_empty(&worker->work_list)) {
751 work = list_first_entry(&worker->work_list,
752 struct kthread_work, node);
753 list_del_init(&work->node);
754 }
755 worker->current_work = work;
756 raw_spin_unlock_irq(&worker->lock);
757
758 if (work) {
759 kthread_work_func_t func = work->func;
760 __set_current_state(TASK_RUNNING);
761 trace_sched_kthread_work_execute_start(work);
762 work->func(work);
763
764
765
766
767 trace_sched_kthread_work_execute_end(work, func);
768 } else if (!freezing(current))
769 schedule();
770
771 try_to_freeze();
772 cond_resched();
773 goto repeat;
774}
775EXPORT_SYMBOL_GPL(kthread_worker_fn);
776
777static __printf(3, 0) struct kthread_worker *
778__kthread_create_worker(int cpu, unsigned int flags,
779 const char namefmt[], va_list args)
780{
781 struct kthread_worker *worker;
782 struct task_struct *task;
783 int node = NUMA_NO_NODE;
784
785 worker = kzalloc(sizeof(*worker), GFP_KERNEL);
786 if (!worker)
787 return ERR_PTR(-ENOMEM);
788
789 kthread_init_worker(worker);
790
791 if (cpu >= 0)
792 node = cpu_to_node(cpu);
793
794 task = __kthread_create_on_node(kthread_worker_fn, worker,
795 node, namefmt, args);
796 if (IS_ERR(task))
797 goto fail_task;
798
799 if (cpu >= 0)
800 kthread_bind(task, cpu);
801
802 worker->flags = flags;
803 worker->task = task;
804 wake_up_process(task);
805 return worker;
806
807fail_task:
808 kfree(worker);
809 return ERR_CAST(task);
810}
811
812
813
814
815
816
817
818
819
820
821struct kthread_worker *
822kthread_create_worker(unsigned int flags, const char namefmt[], ...)
823{
824 struct kthread_worker *worker;
825 va_list args;
826
827 va_start(args, namefmt);
828 worker = __kthread_create_worker(-1, flags, namefmt, args);
829 va_end(args);
830
831 return worker;
832}
833EXPORT_SYMBOL(kthread_create_worker);
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870struct kthread_worker *
871kthread_create_worker_on_cpu(int cpu, unsigned int flags,
872 const char namefmt[], ...)
873{
874 struct kthread_worker *worker;
875 va_list args;
876
877 va_start(args, namefmt);
878 worker = __kthread_create_worker(cpu, flags, namefmt, args);
879 va_end(args);
880
881 return worker;
882}
883EXPORT_SYMBOL(kthread_create_worker_on_cpu);
884
885
886
887
888
889
890static inline bool queuing_blocked(struct kthread_worker *worker,
891 struct kthread_work *work)
892{
893 lockdep_assert_held(&worker->lock);
894
895 return !list_empty(&work->node) || work->canceling;
896}
897
898static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
899 struct kthread_work *work)
900{
901 lockdep_assert_held(&worker->lock);
902 WARN_ON_ONCE(!list_empty(&work->node));
903
904 WARN_ON_ONCE(work->worker && work->worker != worker);
905}
906
907
908static void kthread_insert_work(struct kthread_worker *worker,
909 struct kthread_work *work,
910 struct list_head *pos)
911{
912 kthread_insert_work_sanity_check(worker, work);
913
914 trace_sched_kthread_work_queue_work(worker, work);
915
916 list_add_tail(&work->node, pos);
917 work->worker = worker;
918 if (!worker->current_work && likely(worker->task))
919 wake_up_process(worker->task);
920}
921
922
923
924
925
926
927
928
929
930
931
932
933
934bool kthread_queue_work(struct kthread_worker *worker,
935 struct kthread_work *work)
936{
937 bool ret = false;
938 unsigned long flags;
939
940 raw_spin_lock_irqsave(&worker->lock, flags);
941 if (!queuing_blocked(worker, work)) {
942 kthread_insert_work(worker, work, &worker->work_list);
943 ret = true;
944 }
945 raw_spin_unlock_irqrestore(&worker->lock, flags);
946 return ret;
947}
948EXPORT_SYMBOL_GPL(kthread_queue_work);
949
950
951
952
953
954
955
956
957
958void kthread_delayed_work_timer_fn(struct timer_list *t)
959{
960 struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
961 struct kthread_work *work = &dwork->work;
962 struct kthread_worker *worker = work->worker;
963 unsigned long flags;
964
965
966
967
968
969 if (WARN_ON_ONCE(!worker))
970 return;
971
972 raw_spin_lock_irqsave(&worker->lock, flags);
973
974 WARN_ON_ONCE(work->worker != worker);
975
976
977 WARN_ON_ONCE(list_empty(&work->node));
978 list_del_init(&work->node);
979 if (!work->canceling)
980 kthread_insert_work(worker, work, &worker->work_list);
981
982 raw_spin_unlock_irqrestore(&worker->lock, flags);
983}
984EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
985
986static void __kthread_queue_delayed_work(struct kthread_worker *worker,
987 struct kthread_delayed_work *dwork,
988 unsigned long delay)
989{
990 struct timer_list *timer = &dwork->timer;
991 struct kthread_work *work = &dwork->work;
992
993 WARN_ON_FUNCTION_MISMATCH(timer->function,
994 kthread_delayed_work_timer_fn);
995
996
997
998
999
1000
1001
1002 if (!delay) {
1003 kthread_insert_work(worker, work, &worker->work_list);
1004 return;
1005 }
1006
1007
1008 kthread_insert_work_sanity_check(worker, work);
1009
1010 list_add(&work->node, &worker->delayed_work_list);
1011 work->worker = worker;
1012 timer->expires = jiffies + delay;
1013 add_timer(timer);
1014}
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031bool kthread_queue_delayed_work(struct kthread_worker *worker,
1032 struct kthread_delayed_work *dwork,
1033 unsigned long delay)
1034{
1035 struct kthread_work *work = &dwork->work;
1036 unsigned long flags;
1037 bool ret = false;
1038
1039 raw_spin_lock_irqsave(&worker->lock, flags);
1040
1041 if (!queuing_blocked(worker, work)) {
1042 __kthread_queue_delayed_work(worker, dwork, delay);
1043 ret = true;
1044 }
1045
1046 raw_spin_unlock_irqrestore(&worker->lock, flags);
1047 return ret;
1048}
1049EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
1050
1051struct kthread_flush_work {
1052 struct kthread_work work;
1053 struct completion done;
1054};
1055
1056static void kthread_flush_work_fn(struct kthread_work *work)
1057{
1058 struct kthread_flush_work *fwork =
1059 container_of(work, struct kthread_flush_work, work);
1060 complete(&fwork->done);
1061}
1062
1063
1064
1065
1066
1067
1068
1069void kthread_flush_work(struct kthread_work *work)
1070{
1071 struct kthread_flush_work fwork = {
1072 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1073 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1074 };
1075 struct kthread_worker *worker;
1076 bool noop = false;
1077
1078 worker = work->worker;
1079 if (!worker)
1080 return;
1081
1082 raw_spin_lock_irq(&worker->lock);
1083
1084 WARN_ON_ONCE(work->worker != worker);
1085
1086 if (!list_empty(&work->node))
1087 kthread_insert_work(worker, &fwork.work, work->node.next);
1088 else if (worker->current_work == work)
1089 kthread_insert_work(worker, &fwork.work,
1090 worker->work_list.next);
1091 else
1092 noop = true;
1093
1094 raw_spin_unlock_irq(&worker->lock);
1095
1096 if (!noop)
1097 wait_for_completion(&fwork.done);
1098}
1099EXPORT_SYMBOL_GPL(kthread_flush_work);
1100
1101
1102
1103
1104
1105
1106
1107
1108static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
1109 unsigned long *flags)
1110{
1111 struct kthread_delayed_work *dwork =
1112 container_of(work, struct kthread_delayed_work, work);
1113 struct kthread_worker *worker = work->worker;
1114
1115
1116
1117
1118
1119
1120
1121 work->canceling++;
1122 raw_spin_unlock_irqrestore(&worker->lock, *flags);
1123 del_timer_sync(&dwork->timer);
1124 raw_spin_lock_irqsave(&worker->lock, *flags);
1125 work->canceling--;
1126}
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141static bool __kthread_cancel_work(struct kthread_work *work)
1142{
1143
1144
1145
1146
1147 if (!list_empty(&work->node)) {
1148 list_del_init(&work->node);
1149 return true;
1150 }
1151
1152 return false;
1153}
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178bool kthread_mod_delayed_work(struct kthread_worker *worker,
1179 struct kthread_delayed_work *dwork,
1180 unsigned long delay)
1181{
1182 struct kthread_work *work = &dwork->work;
1183 unsigned long flags;
1184 int ret;
1185
1186 raw_spin_lock_irqsave(&worker->lock, flags);
1187
1188
1189 if (!work->worker) {
1190 ret = false;
1191 goto fast_queue;
1192 }
1193
1194
1195 WARN_ON_ONCE(work->worker != worker);
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209 kthread_cancel_delayed_work_timer(work, &flags);
1210 if (work->canceling) {
1211
1212 ret = true;
1213 goto out;
1214 }
1215 ret = __kthread_cancel_work(work);
1216
1217fast_queue:
1218 __kthread_queue_delayed_work(worker, dwork, delay);
1219out:
1220 raw_spin_unlock_irqrestore(&worker->lock, flags);
1221 return ret;
1222}
1223EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
1224
1225static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1226{
1227 struct kthread_worker *worker = work->worker;
1228 unsigned long flags;
1229 int ret = false;
1230
1231 if (!worker)
1232 goto out;
1233
1234 raw_spin_lock_irqsave(&worker->lock, flags);
1235
1236 WARN_ON_ONCE(work->worker != worker);
1237
1238 if (is_dwork)
1239 kthread_cancel_delayed_work_timer(work, &flags);
1240
1241 ret = __kthread_cancel_work(work);
1242
1243 if (worker->current_work != work)
1244 goto out_fast;
1245
1246
1247
1248
1249
1250 work->canceling++;
1251 raw_spin_unlock_irqrestore(&worker->lock, flags);
1252 kthread_flush_work(work);
1253 raw_spin_lock_irqsave(&worker->lock, flags);
1254 work->canceling--;
1255
1256out_fast:
1257 raw_spin_unlock_irqrestore(&worker->lock, flags);
1258out:
1259 return ret;
1260}
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278bool kthread_cancel_work_sync(struct kthread_work *work)
1279{
1280 return __kthread_cancel_work_sync(work, false);
1281}
1282EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
1294{
1295 return __kthread_cancel_work_sync(&dwork->work, true);
1296}
1297EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
1298
1299
1300
1301
1302
1303
1304
1305
1306void kthread_flush_worker(struct kthread_worker *worker)
1307{
1308 struct kthread_flush_work fwork = {
1309 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1310 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1311 };
1312
1313 kthread_queue_work(worker, &fwork.work);
1314 wait_for_completion(&fwork.done);
1315}
1316EXPORT_SYMBOL_GPL(kthread_flush_worker);
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326void kthread_destroy_worker(struct kthread_worker *worker)
1327{
1328 struct task_struct *task;
1329
1330 task = worker->task;
1331 if (WARN_ON(!task))
1332 return;
1333
1334 kthread_flush_worker(worker);
1335 kthread_stop(task);
1336 WARN_ON(!list_empty(&worker->work_list));
1337 kfree(worker);
1338}
1339EXPORT_SYMBOL(kthread_destroy_worker);
1340
1341
1342
1343
1344
1345void kthread_use_mm(struct mm_struct *mm)
1346{
1347 struct mm_struct *active_mm;
1348 struct task_struct *tsk = current;
1349
1350 WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1351 WARN_ON_ONCE(tsk->mm);
1352
1353 task_lock(tsk);
1354
1355 local_irq_disable();
1356 active_mm = tsk->active_mm;
1357 if (active_mm != mm) {
1358 mmgrab(mm);
1359 tsk->active_mm = mm;
1360 }
1361 tsk->mm = mm;
1362 membarrier_update_current_mm(mm);
1363 switch_mm_irqs_off(active_mm, mm, tsk);
1364 local_irq_enable();
1365 task_unlock(tsk);
1366#ifdef finish_arch_post_lock_switch
1367 finish_arch_post_lock_switch();
1368#endif
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379 if (active_mm != mm)
1380 mmdrop(active_mm);
1381 else
1382 smp_mb();
1383
1384 to_kthread(tsk)->oldfs = force_uaccess_begin();
1385}
1386EXPORT_SYMBOL_GPL(kthread_use_mm);
1387
1388
1389
1390
1391
1392void kthread_unuse_mm(struct mm_struct *mm)
1393{
1394 struct task_struct *tsk = current;
1395
1396 WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1397 WARN_ON_ONCE(!tsk->mm);
1398
1399 force_uaccess_end(to_kthread(tsk)->oldfs);
1400
1401 task_lock(tsk);
1402
1403
1404
1405
1406
1407
1408
1409 smp_mb__after_spinlock();
1410 sync_mm_rss(mm);
1411 local_irq_disable();
1412 tsk->mm = NULL;
1413 membarrier_update_current_mm(NULL);
1414
1415 enter_lazy_tlb(mm, tsk);
1416 local_irq_enable();
1417 task_unlock(tsk);
1418}
1419EXPORT_SYMBOL_GPL(kthread_unuse_mm);
1420
1421#ifdef CONFIG_BLK_CGROUP
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432void kthread_associate_blkcg(struct cgroup_subsys_state *css)
1433{
1434 struct kthread *kthread;
1435
1436 if (!(current->flags & PF_KTHREAD))
1437 return;
1438 kthread = to_kthread(current);
1439 if (!kthread)
1440 return;
1441
1442 if (kthread->blkcg_css) {
1443 css_put(kthread->blkcg_css);
1444 kthread->blkcg_css = NULL;
1445 }
1446 if (css) {
1447 css_get(css);
1448 kthread->blkcg_css = css;
1449 }
1450}
1451EXPORT_SYMBOL(kthread_associate_blkcg);
1452
1453
1454
1455
1456
1457
1458struct cgroup_subsys_state *kthread_blkcg(void)
1459{
1460 struct kthread *kthread;
1461
1462 if (current->flags & PF_KTHREAD) {
1463 kthread = to_kthread(current);
1464 if (kthread)
1465 return kthread->blkcg_css;
1466 }
1467 return NULL;
1468}
1469EXPORT_SYMBOL(kthread_blkcg);
1470#endif
1471