1
2
3
4
5
6
7
8
9
10#include <uapi/linux/sched/types.h>
11#include <linux/mm.h>
12#include <linux/mmu_context.h>
13#include <linux/sched.h>
14#include <linux/sched/mm.h>
15#include <linux/sched/task.h>
16#include <linux/kthread.h>
17#include <linux/completion.h>
18#include <linux/err.h>
19#include <linux/cgroup.h>
20#include <linux/cpuset.h>
21#include <linux/unistd.h>
22#include <linux/file.h>
23#include <linux/export.h>
24#include <linux/mutex.h>
25#include <linux/slab.h>
26#include <linux/freezer.h>
27#include <linux/ptrace.h>
28#include <linux/uaccess.h>
29#include <linux/numa.h>
30#include <linux/sched/isolation.h>
31#include <trace/events/sched.h>
32
33
34static DEFINE_SPINLOCK(kthread_create_lock);
35static LIST_HEAD(kthread_create_list);
36struct task_struct *kthreadd_task;
37
38struct kthread_create_info
39{
40
41 int (*threadfn)(void *data);
42 void *data;
43 int node;
44
45
46 struct task_struct *result;
47 struct completion *done;
48
49 struct list_head list;
50};
51
52struct kthread {
53 unsigned long flags;
54 unsigned int cpu;
55 int (*threadfn)(void *);
56 void *data;
57 mm_segment_t oldfs;
58 struct completion parked;
59 struct completion exited;
60#ifdef CONFIG_BLK_CGROUP
61 struct cgroup_subsys_state *blkcg_css;
62#endif
63};
64
65enum KTHREAD_BITS {
66 KTHREAD_IS_PER_CPU = 0,
67 KTHREAD_SHOULD_STOP,
68 KTHREAD_SHOULD_PARK,
69};
70
71static inline void set_kthread_struct(void *kthread)
72{
73
74
75
76
77
78 current->set_child_tid = (__force void __user *)kthread;
79}
80
81static inline struct kthread *to_kthread(struct task_struct *k)
82{
83 WARN_ON(!(k->flags & PF_KTHREAD));
84 return (__force void *)k->set_child_tid;
85}
86
87void free_kthread_struct(struct task_struct *k)
88{
89 struct kthread *kthread;
90
91
92
93
94
95 kthread = to_kthread(k);
96#ifdef CONFIG_BLK_CGROUP
97 WARN_ON_ONCE(kthread && kthread->blkcg_css);
98#endif
99 kfree(kthread);
100}
101
102
103
104
105
106
107
108
109bool kthread_should_stop(void)
110{
111 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
112}
113EXPORT_SYMBOL(kthread_should_stop);
114
115bool __kthread_should_park(struct task_struct *k)
116{
117 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
118}
119EXPORT_SYMBOL_GPL(__kthread_should_park);
120
121
122
123
124
125
126
127
128
129
130
131
132bool kthread_should_park(void)
133{
134 return __kthread_should_park(current);
135}
136EXPORT_SYMBOL_GPL(kthread_should_park);
137
138
139
140
141
142
143
144
145
146
147bool kthread_freezable_should_stop(bool *was_frozen)
148{
149 bool frozen = false;
150
151 might_sleep();
152
153 if (unlikely(freezing(current)))
154 frozen = __refrigerator(true);
155
156 if (was_frozen)
157 *was_frozen = frozen;
158
159 return kthread_should_stop();
160}
161EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
162
163
164
165
166
167
168
169void *kthread_func(struct task_struct *task)
170{
171 if (task->flags & PF_KTHREAD)
172 return to_kthread(task)->threadfn;
173 return NULL;
174}
175EXPORT_SYMBOL_GPL(kthread_func);
176
177
178
179
180
181
182
183
184
185void *kthread_data(struct task_struct *task)
186{
187 return to_kthread(task)->data;
188}
189EXPORT_SYMBOL_GPL(kthread_data);
190
191
192
193
194
195
196
197
198
199
200void *kthread_probe_data(struct task_struct *task)
201{
202 struct kthread *kthread = to_kthread(task);
203 void *data = NULL;
204
205 copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
206 return data;
207}
208
209static void __kthread_parkme(struct kthread *self)
210{
211 for (;;) {
212
213
214
215
216
217
218
219
220
221 set_special_state(TASK_PARKED);
222 if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
223 break;
224
225
226
227
228
229
230 preempt_disable();
231 complete(&self->parked);
232 schedule_preempt_disabled();
233 preempt_enable();
234 }
235 __set_current_state(TASK_RUNNING);
236}
237
238void kthread_parkme(void)
239{
240 __kthread_parkme(to_kthread(current));
241}
242EXPORT_SYMBOL_GPL(kthread_parkme);
243
244static int kthread(void *_create)
245{
246
247 struct kthread_create_info *create = _create;
248 int (*threadfn)(void *data) = create->threadfn;
249 void *data = create->data;
250 struct completion *done;
251 struct kthread *self;
252 int ret;
253
254 self = kzalloc(sizeof(*self), GFP_KERNEL);
255 set_kthread_struct(self);
256
257
258 done = xchg(&create->done, NULL);
259 if (!done) {
260 kfree(create);
261 do_exit(-EINTR);
262 }
263
264 if (!self) {
265 create->result = ERR_PTR(-ENOMEM);
266 complete(done);
267 do_exit(-ENOMEM);
268 }
269
270 self->threadfn = threadfn;
271 self->data = data;
272 init_completion(&self->exited);
273 init_completion(&self->parked);
274 current->vfork_done = &self->exited;
275
276
277 __set_current_state(TASK_UNINTERRUPTIBLE);
278 create->result = current;
279
280
281
282
283 preempt_disable();
284 complete(done);
285 schedule_preempt_disabled();
286 preempt_enable();
287
288 ret = -EINTR;
289 if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
290 cgroup_kthread_ready();
291 __kthread_parkme(self);
292 ret = threadfn(data);
293 }
294 do_exit(ret);
295}
296
297
298int tsk_fork_get_node(struct task_struct *tsk)
299{
300#ifdef CONFIG_NUMA
301 if (tsk == kthreadd_task)
302 return tsk->pref_node_fork;
303#endif
304 return NUMA_NO_NODE;
305}
306
307static void create_kthread(struct kthread_create_info *create)
308{
309 int pid;
310
311#ifdef CONFIG_NUMA
312 current->pref_node_fork = create->node;
313#endif
314
315 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
316 if (pid < 0) {
317
318 struct completion *done = xchg(&create->done, NULL);
319
320 if (!done) {
321 kfree(create);
322 return;
323 }
324 create->result = ERR_PTR(pid);
325 complete(done);
326 }
327}
328
329static __printf(4, 0)
330struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
331 void *data, int node,
332 const char namefmt[],
333 va_list args)
334{
335 DECLARE_COMPLETION_ONSTACK(done);
336 struct task_struct *task;
337 struct kthread_create_info *create = kmalloc(sizeof(*create),
338 GFP_KERNEL);
339
340 if (!create)
341 return ERR_PTR(-ENOMEM);
342 create->threadfn = threadfn;
343 create->data = data;
344 create->node = node;
345 create->done = &done;
346
347 spin_lock(&kthread_create_lock);
348 list_add_tail(&create->list, &kthread_create_list);
349 spin_unlock(&kthread_create_lock);
350
351 wake_up_process(kthreadd_task);
352
353
354
355
356
357 if (unlikely(wait_for_completion_killable(&done))) {
358
359
360
361
362
363 if (xchg(&create->done, NULL))
364 return ERR_PTR(-EINTR);
365
366
367
368
369 wait_for_completion(&done);
370 }
371 task = create->result;
372 if (!IS_ERR(task)) {
373 static const struct sched_param param = { .sched_priority = 0 };
374 char name[TASK_COMM_LEN];
375
376
377
378
379
380 vsnprintf(name, sizeof(name), namefmt, args);
381 set_task_comm(task, name);
382
383
384
385
386 sched_setscheduler_nocheck(task, SCHED_NORMAL, ¶m);
387 set_cpus_allowed_ptr(task,
388 housekeeping_cpumask(HK_FLAG_KTHREAD));
389 }
390 kfree(create);
391 return task;
392}
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
418 void *data, int node,
419 const char namefmt[],
420 ...)
421{
422 struct task_struct *task;
423 va_list args;
424
425 va_start(args, namefmt);
426 task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
427 va_end(args);
428
429 return task;
430}
431EXPORT_SYMBOL(kthread_create_on_node);
432
433static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
434{
435 unsigned long flags;
436
437 if (!wait_task_inactive(p, state)) {
438 WARN_ON(1);
439 return;
440 }
441
442
443 raw_spin_lock_irqsave(&p->pi_lock, flags);
444 do_set_cpus_allowed(p, mask);
445 p->flags |= PF_NO_SETAFFINITY;
446 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
447}
448
449static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
450{
451 __kthread_bind_mask(p, cpumask_of(cpu), state);
452}
453
454void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
455{
456 __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
457}
458
459
460
461
462
463
464
465
466
467
468void kthread_bind(struct task_struct *p, unsigned int cpu)
469{
470 __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
471}
472EXPORT_SYMBOL(kthread_bind);
473
474
475
476
477
478
479
480
481
482
483
484struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
485 void *data, unsigned int cpu,
486 const char *namefmt)
487{
488 struct task_struct *p;
489
490 p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
491 cpu);
492 if (IS_ERR(p))
493 return p;
494 kthread_bind(p, cpu);
495
496 to_kthread(p)->cpu = cpu;
497 return p;
498}
499
500void kthread_set_per_cpu(struct task_struct *k, int cpu)
501{
502 struct kthread *kthread = to_kthread(k);
503 if (!kthread)
504 return;
505
506 WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
507
508 if (cpu < 0) {
509 clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
510 return;
511 }
512
513 kthread->cpu = cpu;
514 set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
515}
516
517bool kthread_is_per_cpu(struct task_struct *k)
518{
519 struct kthread *kthread = to_kthread(k);
520 if (!kthread)
521 return false;
522
523 return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
524}
525
526
527
528
529
530
531
532
533
534void kthread_unpark(struct task_struct *k)
535{
536 struct kthread *kthread = to_kthread(k);
537
538
539
540
541
542 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
543 __kthread_bind(k, kthread->cpu, TASK_PARKED);
544
545 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
546
547
548
549 wake_up_state(k, TASK_PARKED);
550}
551EXPORT_SYMBOL_GPL(kthread_unpark);
552
553
554
555
556
557
558
559
560
561
562
563
564
565int kthread_park(struct task_struct *k)
566{
567 struct kthread *kthread = to_kthread(k);
568
569 if (WARN_ON(k->flags & PF_EXITING))
570 return -ENOSYS;
571
572 if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
573 return -EBUSY;
574
575 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
576 if (k != current) {
577 wake_up_process(k);
578
579
580
581
582 wait_for_completion(&kthread->parked);
583
584
585
586
587 WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
588 }
589
590 return 0;
591}
592EXPORT_SYMBOL_GPL(kthread_park);
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609int kthread_stop(struct task_struct *k)
610{
611 struct kthread *kthread;
612 int ret;
613
614 trace_sched_kthread_stop(k);
615
616 get_task_struct(k);
617 kthread = to_kthread(k);
618 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
619 kthread_unpark(k);
620 wake_up_process(k);
621 wait_for_completion(&kthread->exited);
622 ret = k->exit_code;
623 put_task_struct(k);
624
625 trace_sched_kthread_stop_ret(ret);
626 return ret;
627}
628EXPORT_SYMBOL(kthread_stop);
629
630int kthreadd(void *unused)
631{
632 struct task_struct *tsk = current;
633
634
635 set_task_comm(tsk, "kthreadd");
636 ignore_signals(tsk);
637 set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_FLAG_KTHREAD));
638 set_mems_allowed(node_states[N_MEMORY]);
639
640 current->flags |= PF_NOFREEZE;
641 cgroup_init_kthreadd();
642
643 for (;;) {
644 set_current_state(TASK_INTERRUPTIBLE);
645 if (list_empty(&kthread_create_list))
646 schedule();
647 __set_current_state(TASK_RUNNING);
648
649 spin_lock(&kthread_create_lock);
650 while (!list_empty(&kthread_create_list)) {
651 struct kthread_create_info *create;
652
653 create = list_entry(kthread_create_list.next,
654 struct kthread_create_info, list);
655 list_del_init(&create->list);
656 spin_unlock(&kthread_create_lock);
657
658 create_kthread(create);
659
660 spin_lock(&kthread_create_lock);
661 }
662 spin_unlock(&kthread_create_lock);
663 }
664
665 return 0;
666}
667
668void __kthread_init_worker(struct kthread_worker *worker,
669 const char *name,
670 struct lock_class_key *key)
671{
672 memset(worker, 0, sizeof(struct kthread_worker));
673 raw_spin_lock_init(&worker->lock);
674 lockdep_set_class_and_name(&worker->lock, key, name);
675 INIT_LIST_HEAD(&worker->work_list);
676 INIT_LIST_HEAD(&worker->delayed_work_list);
677}
678EXPORT_SYMBOL_GPL(__kthread_init_worker);
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695int kthread_worker_fn(void *worker_ptr)
696{
697 struct kthread_worker *worker = worker_ptr;
698 struct kthread_work *work;
699
700
701
702
703
704 WARN_ON(worker->task && worker->task != current);
705 worker->task = current;
706
707 if (worker->flags & KTW_FREEZABLE)
708 set_freezable();
709
710repeat:
711 set_current_state(TASK_INTERRUPTIBLE);
712
713 if (kthread_should_stop()) {
714 __set_current_state(TASK_RUNNING);
715 raw_spin_lock_irq(&worker->lock);
716 worker->task = NULL;
717 raw_spin_unlock_irq(&worker->lock);
718 return 0;
719 }
720
721 work = NULL;
722 raw_spin_lock_irq(&worker->lock);
723 if (!list_empty(&worker->work_list)) {
724 work = list_first_entry(&worker->work_list,
725 struct kthread_work, node);
726 list_del_init(&work->node);
727 }
728 worker->current_work = work;
729 raw_spin_unlock_irq(&worker->lock);
730
731 if (work) {
732 kthread_work_func_t func = work->func;
733 __set_current_state(TASK_RUNNING);
734 trace_sched_kthread_work_execute_start(work);
735 work->func(work);
736
737
738
739
740 trace_sched_kthread_work_execute_end(work, func);
741 } else if (!freezing(current))
742 schedule();
743
744 try_to_freeze();
745 cond_resched();
746 goto repeat;
747}
748EXPORT_SYMBOL_GPL(kthread_worker_fn);
749
750static __printf(3, 0) struct kthread_worker *
751__kthread_create_worker(int cpu, unsigned int flags,
752 const char namefmt[], va_list args)
753{
754 struct kthread_worker *worker;
755 struct task_struct *task;
756 int node = NUMA_NO_NODE;
757
758 worker = kzalloc(sizeof(*worker), GFP_KERNEL);
759 if (!worker)
760 return ERR_PTR(-ENOMEM);
761
762 kthread_init_worker(worker);
763
764 if (cpu >= 0)
765 node = cpu_to_node(cpu);
766
767 task = __kthread_create_on_node(kthread_worker_fn, worker,
768 node, namefmt, args);
769 if (IS_ERR(task))
770 goto fail_task;
771
772 if (cpu >= 0)
773 kthread_bind(task, cpu);
774
775 worker->flags = flags;
776 worker->task = task;
777 wake_up_process(task);
778 return worker;
779
780fail_task:
781 kfree(worker);
782 return ERR_CAST(task);
783}
784
785
786
787
788
789
790
791
792
793
794struct kthread_worker *
795kthread_create_worker(unsigned int flags, const char namefmt[], ...)
796{
797 struct kthread_worker *worker;
798 va_list args;
799
800 va_start(args, namefmt);
801 worker = __kthread_create_worker(-1, flags, namefmt, args);
802 va_end(args);
803
804 return worker;
805}
806EXPORT_SYMBOL(kthread_create_worker);
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843struct kthread_worker *
844kthread_create_worker_on_cpu(int cpu, unsigned int flags,
845 const char namefmt[], ...)
846{
847 struct kthread_worker *worker;
848 va_list args;
849
850 va_start(args, namefmt);
851 worker = __kthread_create_worker(cpu, flags, namefmt, args);
852 va_end(args);
853
854 return worker;
855}
856EXPORT_SYMBOL(kthread_create_worker_on_cpu);
857
858
859
860
861
862
863static inline bool queuing_blocked(struct kthread_worker *worker,
864 struct kthread_work *work)
865{
866 lockdep_assert_held(&worker->lock);
867
868 return !list_empty(&work->node) || work->canceling;
869}
870
871static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
872 struct kthread_work *work)
873{
874 lockdep_assert_held(&worker->lock);
875 WARN_ON_ONCE(!list_empty(&work->node));
876
877 WARN_ON_ONCE(work->worker && work->worker != worker);
878}
879
880
881static void kthread_insert_work(struct kthread_worker *worker,
882 struct kthread_work *work,
883 struct list_head *pos)
884{
885 kthread_insert_work_sanity_check(worker, work);
886
887 trace_sched_kthread_work_queue_work(worker, work);
888
889 list_add_tail(&work->node, pos);
890 work->worker = worker;
891 if (!worker->current_work && likely(worker->task))
892 wake_up_process(worker->task);
893}
894
895
896
897
898
899
900
901
902
903
904
905
906
907bool kthread_queue_work(struct kthread_worker *worker,
908 struct kthread_work *work)
909{
910 bool ret = false;
911 unsigned long flags;
912
913 raw_spin_lock_irqsave(&worker->lock, flags);
914 if (!queuing_blocked(worker, work)) {
915 kthread_insert_work(worker, work, &worker->work_list);
916 ret = true;
917 }
918 raw_spin_unlock_irqrestore(&worker->lock, flags);
919 return ret;
920}
921EXPORT_SYMBOL_GPL(kthread_queue_work);
922
923
924
925
926
927
928
929
930
931void kthread_delayed_work_timer_fn(struct timer_list *t)
932{
933 struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
934 struct kthread_work *work = &dwork->work;
935 struct kthread_worker *worker = work->worker;
936 unsigned long flags;
937
938
939
940
941
942 if (WARN_ON_ONCE(!worker))
943 return;
944
945 raw_spin_lock_irqsave(&worker->lock, flags);
946
947 WARN_ON_ONCE(work->worker != worker);
948
949
950 WARN_ON_ONCE(list_empty(&work->node));
951 list_del_init(&work->node);
952 if (!work->canceling)
953 kthread_insert_work(worker, work, &worker->work_list);
954
955 raw_spin_unlock_irqrestore(&worker->lock, flags);
956}
957EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
958
959static void __kthread_queue_delayed_work(struct kthread_worker *worker,
960 struct kthread_delayed_work *dwork,
961 unsigned long delay)
962{
963 struct timer_list *timer = &dwork->timer;
964 struct kthread_work *work = &dwork->work;
965
966 WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
967
968
969
970
971
972
973
974 if (!delay) {
975 kthread_insert_work(worker, work, &worker->work_list);
976 return;
977 }
978
979
980 kthread_insert_work_sanity_check(worker, work);
981
982 list_add(&work->node, &worker->delayed_work_list);
983 work->worker = worker;
984 timer->expires = jiffies + delay;
985 add_timer(timer);
986}
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003bool kthread_queue_delayed_work(struct kthread_worker *worker,
1004 struct kthread_delayed_work *dwork,
1005 unsigned long delay)
1006{
1007 struct kthread_work *work = &dwork->work;
1008 unsigned long flags;
1009 bool ret = false;
1010
1011 raw_spin_lock_irqsave(&worker->lock, flags);
1012
1013 if (!queuing_blocked(worker, work)) {
1014 __kthread_queue_delayed_work(worker, dwork, delay);
1015 ret = true;
1016 }
1017
1018 raw_spin_unlock_irqrestore(&worker->lock, flags);
1019 return ret;
1020}
1021EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
1022
1023struct kthread_flush_work {
1024 struct kthread_work work;
1025 struct completion done;
1026};
1027
1028static void kthread_flush_work_fn(struct kthread_work *work)
1029{
1030 struct kthread_flush_work *fwork =
1031 container_of(work, struct kthread_flush_work, work);
1032 complete(&fwork->done);
1033}
1034
1035
1036
1037
1038
1039
1040
1041void kthread_flush_work(struct kthread_work *work)
1042{
1043 struct kthread_flush_work fwork = {
1044 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1045 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1046 };
1047 struct kthread_worker *worker;
1048 bool noop = false;
1049
1050 worker = work->worker;
1051 if (!worker)
1052 return;
1053
1054 raw_spin_lock_irq(&worker->lock);
1055
1056 WARN_ON_ONCE(work->worker != worker);
1057
1058 if (!list_empty(&work->node))
1059 kthread_insert_work(worker, &fwork.work, work->node.next);
1060 else if (worker->current_work == work)
1061 kthread_insert_work(worker, &fwork.work,
1062 worker->work_list.next);
1063 else
1064 noop = true;
1065
1066 raw_spin_unlock_irq(&worker->lock);
1067
1068 if (!noop)
1069 wait_for_completion(&fwork.done);
1070}
1071EXPORT_SYMBOL_GPL(kthread_flush_work);
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
1084 unsigned long *flags)
1085{
1086
1087 if (is_dwork) {
1088 struct kthread_delayed_work *dwork =
1089 container_of(work, struct kthread_delayed_work, work);
1090 struct kthread_worker *worker = work->worker;
1091
1092
1093
1094
1095
1096
1097
1098 work->canceling++;
1099 raw_spin_unlock_irqrestore(&worker->lock, *flags);
1100 del_timer_sync(&dwork->timer);
1101 raw_spin_lock_irqsave(&worker->lock, *flags);
1102 work->canceling--;
1103 }
1104
1105
1106
1107
1108
1109 if (!list_empty(&work->node)) {
1110 list_del_init(&work->node);
1111 return true;
1112 }
1113
1114 return false;
1115}
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140bool kthread_mod_delayed_work(struct kthread_worker *worker,
1141 struct kthread_delayed_work *dwork,
1142 unsigned long delay)
1143{
1144 struct kthread_work *work = &dwork->work;
1145 unsigned long flags;
1146 int ret = false;
1147
1148 raw_spin_lock_irqsave(&worker->lock, flags);
1149
1150
1151 if (!work->worker)
1152 goto fast_queue;
1153
1154
1155 WARN_ON_ONCE(work->worker != worker);
1156
1157
1158 if (work->canceling)
1159 goto out;
1160
1161 ret = __kthread_cancel_work(work, true, &flags);
1162fast_queue:
1163 __kthread_queue_delayed_work(worker, dwork, delay);
1164out:
1165 raw_spin_unlock_irqrestore(&worker->lock, flags);
1166 return ret;
1167}
1168EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
1169
1170static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1171{
1172 struct kthread_worker *worker = work->worker;
1173 unsigned long flags;
1174 int ret = false;
1175
1176 if (!worker)
1177 goto out;
1178
1179 raw_spin_lock_irqsave(&worker->lock, flags);
1180
1181 WARN_ON_ONCE(work->worker != worker);
1182
1183 ret = __kthread_cancel_work(work, is_dwork, &flags);
1184
1185 if (worker->current_work != work)
1186 goto out_fast;
1187
1188
1189
1190
1191
1192 work->canceling++;
1193 raw_spin_unlock_irqrestore(&worker->lock, flags);
1194 kthread_flush_work(work);
1195 raw_spin_lock_irqsave(&worker->lock, flags);
1196 work->canceling--;
1197
1198out_fast:
1199 raw_spin_unlock_irqrestore(&worker->lock, flags);
1200out:
1201 return ret;
1202}
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220bool kthread_cancel_work_sync(struct kthread_work *work)
1221{
1222 return __kthread_cancel_work_sync(work, false);
1223}
1224EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
1236{
1237 return __kthread_cancel_work_sync(&dwork->work, true);
1238}
1239EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
1240
1241
1242
1243
1244
1245
1246
1247
1248void kthread_flush_worker(struct kthread_worker *worker)
1249{
1250 struct kthread_flush_work fwork = {
1251 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1252 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1253 };
1254
1255 kthread_queue_work(worker, &fwork.work);
1256 wait_for_completion(&fwork.done);
1257}
1258EXPORT_SYMBOL_GPL(kthread_flush_worker);
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268void kthread_destroy_worker(struct kthread_worker *worker)
1269{
1270 struct task_struct *task;
1271
1272 task = worker->task;
1273 if (WARN_ON(!task))
1274 return;
1275
1276 kthread_flush_worker(worker);
1277 kthread_stop(task);
1278 WARN_ON(!list_empty(&worker->work_list));
1279 kfree(worker);
1280}
1281EXPORT_SYMBOL(kthread_destroy_worker);
1282
1283
1284
1285
1286
1287void kthread_use_mm(struct mm_struct *mm)
1288{
1289 struct mm_struct *active_mm;
1290 struct task_struct *tsk = current;
1291
1292 WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1293 WARN_ON_ONCE(tsk->mm);
1294
1295 task_lock(tsk);
1296
1297 local_irq_disable();
1298 active_mm = tsk->active_mm;
1299 if (active_mm != mm) {
1300 mmgrab(mm);
1301 tsk->active_mm = mm;
1302 }
1303 tsk->mm = mm;
1304 membarrier_update_current_mm(mm);
1305 switch_mm_irqs_off(active_mm, mm, tsk);
1306 local_irq_enable();
1307 task_unlock(tsk);
1308#ifdef finish_arch_post_lock_switch
1309 finish_arch_post_lock_switch();
1310#endif
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321 if (active_mm != mm)
1322 mmdrop(active_mm);
1323 else
1324 smp_mb();
1325
1326 to_kthread(tsk)->oldfs = force_uaccess_begin();
1327}
1328EXPORT_SYMBOL_GPL(kthread_use_mm);
1329
1330
1331
1332
1333
1334void kthread_unuse_mm(struct mm_struct *mm)
1335{
1336 struct task_struct *tsk = current;
1337
1338 WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1339 WARN_ON_ONCE(!tsk->mm);
1340
1341 force_uaccess_end(to_kthread(tsk)->oldfs);
1342
1343 task_lock(tsk);
1344
1345
1346
1347
1348
1349
1350
1351 smp_mb__after_spinlock();
1352 sync_mm_rss(mm);
1353 local_irq_disable();
1354 tsk->mm = NULL;
1355 membarrier_update_current_mm(NULL);
1356
1357 enter_lazy_tlb(mm, tsk);
1358 local_irq_enable();
1359 task_unlock(tsk);
1360}
1361EXPORT_SYMBOL_GPL(kthread_unuse_mm);
1362
1363#ifdef CONFIG_BLK_CGROUP
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374void kthread_associate_blkcg(struct cgroup_subsys_state *css)
1375{
1376 struct kthread *kthread;
1377
1378 if (!(current->flags & PF_KTHREAD))
1379 return;
1380 kthread = to_kthread(current);
1381 if (!kthread)
1382 return;
1383
1384 if (kthread->blkcg_css) {
1385 css_put(kthread->blkcg_css);
1386 kthread->blkcg_css = NULL;
1387 }
1388 if (css) {
1389 css_get(css);
1390 kthread->blkcg_css = css;
1391 }
1392}
1393EXPORT_SYMBOL(kthread_associate_blkcg);
1394
1395
1396
1397
1398
1399
1400struct cgroup_subsys_state *kthread_blkcg(void)
1401{
1402 struct kthread *kthread;
1403
1404 if (current->flags & PF_KTHREAD) {
1405 kthread = to_kthread(current);
1406 if (kthread)
1407 return kthread->blkcg_css;
1408 }
1409 return NULL;
1410}
1411EXPORT_SYMBOL(kthread_blkcg);
1412#endif
1413