1
2
3
4
5
6
7
8
9
10#include <uapi/linux/sched/types.h>
11#include <linux/mm.h>
12#include <linux/mmu_context.h>
13#include <linux/sched.h>
14#include <linux/sched/mm.h>
15#include <linux/sched/task.h>
16#include <linux/kthread.h>
17#include <linux/completion.h>
18#include <linux/err.h>
19#include <linux/cgroup.h>
20#include <linux/cpuset.h>
21#include <linux/unistd.h>
22#include <linux/file.h>
23#include <linux/export.h>
24#include <linux/mutex.h>
25#include <linux/slab.h>
26#include <linux/freezer.h>
27#include <linux/ptrace.h>
28#include <linux/uaccess.h>
29#include <linux/numa.h>
30#include <linux/sched/isolation.h>
31#include <trace/events/sched.h>
32
33
34static DEFINE_SPINLOCK(kthread_create_lock);
35static LIST_HEAD(kthread_create_list);
36struct task_struct *kthreadd_task;
37
38struct kthread_create_info
39{
40
41 int (*threadfn)(void *data);
42 void *data;
43 int node;
44
45
46 struct task_struct *result;
47 struct completion *done;
48
49 struct list_head list;
50};
51
52struct kthread {
53 unsigned long flags;
54 unsigned int cpu;
55 int (*threadfn)(void *);
56 void *data;
57 mm_segment_t oldfs;
58 struct completion parked;
59 struct completion exited;
60#ifdef CONFIG_BLK_CGROUP
61 struct cgroup_subsys_state *blkcg_css;
62#endif
63};
64
65enum KTHREAD_BITS {
66 KTHREAD_IS_PER_CPU = 0,
67 KTHREAD_SHOULD_STOP,
68 KTHREAD_SHOULD_PARK,
69};
70
71static inline void set_kthread_struct(void *kthread)
72{
73
74
75
76
77
78 current->set_child_tid = (__force void __user *)kthread;
79}
80
81static inline struct kthread *to_kthread(struct task_struct *k)
82{
83 WARN_ON(!(k->flags & PF_KTHREAD));
84 return (__force void *)k->set_child_tid;
85}
86
87
88
89
90
91
92
93
94
95
96
97
98static inline struct kthread *__to_kthread(struct task_struct *p)
99{
100 void *kthread = (__force void *)p->set_child_tid;
101 if (kthread && !(p->flags & PF_KTHREAD))
102 kthread = NULL;
103 return kthread;
104}
105
106void free_kthread_struct(struct task_struct *k)
107{
108 struct kthread *kthread;
109
110
111
112
113
114 kthread = to_kthread(k);
115#ifdef CONFIG_BLK_CGROUP
116 WARN_ON_ONCE(kthread && kthread->blkcg_css);
117#endif
118 kfree(kthread);
119}
120
121
122
123
124
125
126
127
128bool kthread_should_stop(void)
129{
130 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
131}
132EXPORT_SYMBOL(kthread_should_stop);
133
134bool __kthread_should_park(struct task_struct *k)
135{
136 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
137}
138EXPORT_SYMBOL_GPL(__kthread_should_park);
139
140
141
142
143
144
145
146
147
148
149
150
151bool kthread_should_park(void)
152{
153 return __kthread_should_park(current);
154}
155EXPORT_SYMBOL_GPL(kthread_should_park);
156
157
158
159
160
161
162
163
164
165
166bool kthread_freezable_should_stop(bool *was_frozen)
167{
168 bool frozen = false;
169
170 might_sleep();
171
172 if (unlikely(freezing(current)))
173 frozen = __refrigerator(true);
174
175 if (was_frozen)
176 *was_frozen = frozen;
177
178 return kthread_should_stop();
179}
180EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
181
182
183
184
185
186
187
188void *kthread_func(struct task_struct *task)
189{
190 struct kthread *kthread = __to_kthread(task);
191 if (kthread)
192 return kthread->threadfn;
193 return NULL;
194}
195EXPORT_SYMBOL_GPL(kthread_func);
196
197
198
199
200
201
202
203
204
205void *kthread_data(struct task_struct *task)
206{
207 return to_kthread(task)->data;
208}
209EXPORT_SYMBOL_GPL(kthread_data);
210
211
212
213
214
215
216
217
218
219
220void *kthread_probe_data(struct task_struct *task)
221{
222 struct kthread *kthread = __to_kthread(task);
223 void *data = NULL;
224
225 if (kthread)
226 copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
227 return data;
228}
229
230static void __kthread_parkme(struct kthread *self)
231{
232 for (;;) {
233
234
235
236
237
238
239
240
241
242 set_special_state(TASK_PARKED);
243 if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
244 break;
245
246
247
248
249
250
251 preempt_disable();
252 complete(&self->parked);
253 schedule_preempt_disabled();
254 preempt_enable();
255 }
256 __set_current_state(TASK_RUNNING);
257}
258
259void kthread_parkme(void)
260{
261 __kthread_parkme(to_kthread(current));
262}
263EXPORT_SYMBOL_GPL(kthread_parkme);
264
265static int kthread(void *_create)
266{
267
268 struct kthread_create_info *create = _create;
269 int (*threadfn)(void *data) = create->threadfn;
270 void *data = create->data;
271 struct completion *done;
272 struct kthread *self;
273 int ret;
274
275 self = kzalloc(sizeof(*self), GFP_KERNEL);
276 set_kthread_struct(self);
277
278
279 done = xchg(&create->done, NULL);
280 if (!done) {
281 kfree(create);
282 do_exit(-EINTR);
283 }
284
285 if (!self) {
286 create->result = ERR_PTR(-ENOMEM);
287 complete(done);
288 do_exit(-ENOMEM);
289 }
290
291 self->threadfn = threadfn;
292 self->data = data;
293 init_completion(&self->exited);
294 init_completion(&self->parked);
295 current->vfork_done = &self->exited;
296
297
298 __set_current_state(TASK_UNINTERRUPTIBLE);
299 create->result = current;
300
301
302
303
304 preempt_disable();
305 complete(done);
306 schedule_preempt_disabled();
307 preempt_enable();
308
309 ret = -EINTR;
310 if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
311 cgroup_kthread_ready();
312 __kthread_parkme(self);
313 ret = threadfn(data);
314 }
315 do_exit(ret);
316}
317
318
319int tsk_fork_get_node(struct task_struct *tsk)
320{
321#ifdef CONFIG_NUMA
322 if (tsk == kthreadd_task)
323 return tsk->pref_node_fork;
324#endif
325 return NUMA_NO_NODE;
326}
327
328static void create_kthread(struct kthread_create_info *create)
329{
330 int pid;
331
332#ifdef CONFIG_NUMA
333 current->pref_node_fork = create->node;
334#endif
335
336 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
337 if (pid < 0) {
338
339 struct completion *done = xchg(&create->done, NULL);
340
341 if (!done) {
342 kfree(create);
343 return;
344 }
345 create->result = ERR_PTR(pid);
346 complete(done);
347 }
348}
349
350static __printf(4, 0)
351struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
352 void *data, int node,
353 const char namefmt[],
354 va_list args)
355{
356 DECLARE_COMPLETION_ONSTACK(done);
357 struct task_struct *task;
358 struct kthread_create_info *create = kmalloc(sizeof(*create),
359 GFP_KERNEL);
360
361 if (!create)
362 return ERR_PTR(-ENOMEM);
363 create->threadfn = threadfn;
364 create->data = data;
365 create->node = node;
366 create->done = &done;
367
368 spin_lock(&kthread_create_lock);
369 list_add_tail(&create->list, &kthread_create_list);
370 spin_unlock(&kthread_create_lock);
371
372 wake_up_process(kthreadd_task);
373
374
375
376
377
378 if (unlikely(wait_for_completion_killable(&done))) {
379
380
381
382
383
384 if (xchg(&create->done, NULL))
385 return ERR_PTR(-EINTR);
386
387
388
389
390 wait_for_completion(&done);
391 }
392 task = create->result;
393 if (!IS_ERR(task)) {
394 static const struct sched_param param = { .sched_priority = 0 };
395 char name[TASK_COMM_LEN];
396
397
398
399
400
401 vsnprintf(name, sizeof(name), namefmt, args);
402 set_task_comm(task, name);
403
404
405
406
407 sched_setscheduler_nocheck(task, SCHED_NORMAL, ¶m);
408 set_cpus_allowed_ptr(task,
409 housekeeping_cpumask(HK_FLAG_KTHREAD));
410 }
411 kfree(create);
412 return task;
413}
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
439 void *data, int node,
440 const char namefmt[],
441 ...)
442{
443 struct task_struct *task;
444 va_list args;
445
446 va_start(args, namefmt);
447 task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
448 va_end(args);
449
450 return task;
451}
452EXPORT_SYMBOL(kthread_create_on_node);
453
454static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
455{
456 unsigned long flags;
457
458 if (!wait_task_inactive(p, state)) {
459 WARN_ON(1);
460 return;
461 }
462
463
464 raw_spin_lock_irqsave(&p->pi_lock, flags);
465 do_set_cpus_allowed(p, mask);
466 p->flags |= PF_NO_SETAFFINITY;
467 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
468}
469
470static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
471{
472 __kthread_bind_mask(p, cpumask_of(cpu), state);
473}
474
475void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
476{
477 __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
478}
479
480
481
482
483
484
485
486
487
488
489void kthread_bind(struct task_struct *p, unsigned int cpu)
490{
491 __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
492}
493EXPORT_SYMBOL(kthread_bind);
494
495
496
497
498
499
500
501
502
503
504
505struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
506 void *data, unsigned int cpu,
507 const char *namefmt)
508{
509 struct task_struct *p;
510
511 p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
512 cpu);
513 if (IS_ERR(p))
514 return p;
515 kthread_bind(p, cpu);
516
517 to_kthread(p)->cpu = cpu;
518 return p;
519}
520
521void kthread_set_per_cpu(struct task_struct *k, int cpu)
522{
523 struct kthread *kthread = to_kthread(k);
524 if (!kthread)
525 return;
526
527 WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
528
529 if (cpu < 0) {
530 clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
531 return;
532 }
533
534 kthread->cpu = cpu;
535 set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
536}
537
538bool kthread_is_per_cpu(struct task_struct *p)
539{
540 struct kthread *kthread = __to_kthread(p);
541 if (!kthread)
542 return false;
543
544 return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
545}
546
547
548
549
550
551
552
553
554
555void kthread_unpark(struct task_struct *k)
556{
557 struct kthread *kthread = to_kthread(k);
558
559
560
561
562
563 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
564 __kthread_bind(k, kthread->cpu, TASK_PARKED);
565
566 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
567
568
569
570 wake_up_state(k, TASK_PARKED);
571}
572EXPORT_SYMBOL_GPL(kthread_unpark);
573
574
575
576
577
578
579
580
581
582
583
584
585
586int kthread_park(struct task_struct *k)
587{
588 struct kthread *kthread = to_kthread(k);
589
590 if (WARN_ON(k->flags & PF_EXITING))
591 return -ENOSYS;
592
593 if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
594 return -EBUSY;
595
596 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
597 if (k != current) {
598 wake_up_process(k);
599
600
601
602
603 wait_for_completion(&kthread->parked);
604
605
606
607
608 WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
609 }
610
611 return 0;
612}
613EXPORT_SYMBOL_GPL(kthread_park);
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630int kthread_stop(struct task_struct *k)
631{
632 struct kthread *kthread;
633 int ret;
634
635 trace_sched_kthread_stop(k);
636
637 get_task_struct(k);
638 kthread = to_kthread(k);
639 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
640 kthread_unpark(k);
641 wake_up_process(k);
642 wait_for_completion(&kthread->exited);
643 ret = k->exit_code;
644 put_task_struct(k);
645
646 trace_sched_kthread_stop_ret(ret);
647 return ret;
648}
649EXPORT_SYMBOL(kthread_stop);
650
651int kthreadd(void *unused)
652{
653 struct task_struct *tsk = current;
654
655
656 set_task_comm(tsk, "kthreadd");
657 ignore_signals(tsk);
658 set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_FLAG_KTHREAD));
659 set_mems_allowed(node_states[N_MEMORY]);
660
661 current->flags |= PF_NOFREEZE;
662 cgroup_init_kthreadd();
663
664 for (;;) {
665 set_current_state(TASK_INTERRUPTIBLE);
666 if (list_empty(&kthread_create_list))
667 schedule();
668 __set_current_state(TASK_RUNNING);
669
670 spin_lock(&kthread_create_lock);
671 while (!list_empty(&kthread_create_list)) {
672 struct kthread_create_info *create;
673
674 create = list_entry(kthread_create_list.next,
675 struct kthread_create_info, list);
676 list_del_init(&create->list);
677 spin_unlock(&kthread_create_lock);
678
679 create_kthread(create);
680
681 spin_lock(&kthread_create_lock);
682 }
683 spin_unlock(&kthread_create_lock);
684 }
685
686 return 0;
687}
688
689void __kthread_init_worker(struct kthread_worker *worker,
690 const char *name,
691 struct lock_class_key *key)
692{
693 memset(worker, 0, sizeof(struct kthread_worker));
694 raw_spin_lock_init(&worker->lock);
695 lockdep_set_class_and_name(&worker->lock, key, name);
696 INIT_LIST_HEAD(&worker->work_list);
697 INIT_LIST_HEAD(&worker->delayed_work_list);
698}
699EXPORT_SYMBOL_GPL(__kthread_init_worker);
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716int kthread_worker_fn(void *worker_ptr)
717{
718 struct kthread_worker *worker = worker_ptr;
719 struct kthread_work *work;
720
721
722
723
724
725 WARN_ON(worker->task && worker->task != current);
726 worker->task = current;
727
728 if (worker->flags & KTW_FREEZABLE)
729 set_freezable();
730
731repeat:
732 set_current_state(TASK_INTERRUPTIBLE);
733
734 if (kthread_should_stop()) {
735 __set_current_state(TASK_RUNNING);
736 raw_spin_lock_irq(&worker->lock);
737 worker->task = NULL;
738 raw_spin_unlock_irq(&worker->lock);
739 return 0;
740 }
741
742 work = NULL;
743 raw_spin_lock_irq(&worker->lock);
744 if (!list_empty(&worker->work_list)) {
745 work = list_first_entry(&worker->work_list,
746 struct kthread_work, node);
747 list_del_init(&work->node);
748 }
749 worker->current_work = work;
750 raw_spin_unlock_irq(&worker->lock);
751
752 if (work) {
753 kthread_work_func_t func = work->func;
754 __set_current_state(TASK_RUNNING);
755 trace_sched_kthread_work_execute_start(work);
756 work->func(work);
757
758
759
760
761 trace_sched_kthread_work_execute_end(work, func);
762 } else if (!freezing(current))
763 schedule();
764
765 try_to_freeze();
766 cond_resched();
767 goto repeat;
768}
769EXPORT_SYMBOL_GPL(kthread_worker_fn);
770
771static __printf(3, 0) struct kthread_worker *
772__kthread_create_worker(int cpu, unsigned int flags,
773 const char namefmt[], va_list args)
774{
775 struct kthread_worker *worker;
776 struct task_struct *task;
777 int node = NUMA_NO_NODE;
778
779 worker = kzalloc(sizeof(*worker), GFP_KERNEL);
780 if (!worker)
781 return ERR_PTR(-ENOMEM);
782
783 kthread_init_worker(worker);
784
785 if (cpu >= 0)
786 node = cpu_to_node(cpu);
787
788 task = __kthread_create_on_node(kthread_worker_fn, worker,
789 node, namefmt, args);
790 if (IS_ERR(task))
791 goto fail_task;
792
793 if (cpu >= 0)
794 kthread_bind(task, cpu);
795
796 worker->flags = flags;
797 worker->task = task;
798 wake_up_process(task);
799 return worker;
800
801fail_task:
802 kfree(worker);
803 return ERR_CAST(task);
804}
805
806
807
808
809
810
811
812
813
814
815struct kthread_worker *
816kthread_create_worker(unsigned int flags, const char namefmt[], ...)
817{
818 struct kthread_worker *worker;
819 va_list args;
820
821 va_start(args, namefmt);
822 worker = __kthread_create_worker(-1, flags, namefmt, args);
823 va_end(args);
824
825 return worker;
826}
827EXPORT_SYMBOL(kthread_create_worker);
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864struct kthread_worker *
865kthread_create_worker_on_cpu(int cpu, unsigned int flags,
866 const char namefmt[], ...)
867{
868 struct kthread_worker *worker;
869 va_list args;
870
871 va_start(args, namefmt);
872 worker = __kthread_create_worker(cpu, flags, namefmt, args);
873 va_end(args);
874
875 return worker;
876}
877EXPORT_SYMBOL(kthread_create_worker_on_cpu);
878
879
880
881
882
883
884static inline bool queuing_blocked(struct kthread_worker *worker,
885 struct kthread_work *work)
886{
887 lockdep_assert_held(&worker->lock);
888
889 return !list_empty(&work->node) || work->canceling;
890}
891
892static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
893 struct kthread_work *work)
894{
895 lockdep_assert_held(&worker->lock);
896 WARN_ON_ONCE(!list_empty(&work->node));
897
898 WARN_ON_ONCE(work->worker && work->worker != worker);
899}
900
901
902static void kthread_insert_work(struct kthread_worker *worker,
903 struct kthread_work *work,
904 struct list_head *pos)
905{
906 kthread_insert_work_sanity_check(worker, work);
907
908 trace_sched_kthread_work_queue_work(worker, work);
909
910 list_add_tail(&work->node, pos);
911 work->worker = worker;
912 if (!worker->current_work && likely(worker->task))
913 wake_up_process(worker->task);
914}
915
916
917
918
919
920
921
922
923
924
925
926
927
928bool kthread_queue_work(struct kthread_worker *worker,
929 struct kthread_work *work)
930{
931 bool ret = false;
932 unsigned long flags;
933
934 raw_spin_lock_irqsave(&worker->lock, flags);
935 if (!queuing_blocked(worker, work)) {
936 kthread_insert_work(worker, work, &worker->work_list);
937 ret = true;
938 }
939 raw_spin_unlock_irqrestore(&worker->lock, flags);
940 return ret;
941}
942EXPORT_SYMBOL_GPL(kthread_queue_work);
943
944
945
946
947
948
949
950
951
952void kthread_delayed_work_timer_fn(struct timer_list *t)
953{
954 struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
955 struct kthread_work *work = &dwork->work;
956 struct kthread_worker *worker = work->worker;
957 unsigned long flags;
958
959
960
961
962
963 if (WARN_ON_ONCE(!worker))
964 return;
965
966 raw_spin_lock_irqsave(&worker->lock, flags);
967
968 WARN_ON_ONCE(work->worker != worker);
969
970
971 WARN_ON_ONCE(list_empty(&work->node));
972 list_del_init(&work->node);
973 if (!work->canceling)
974 kthread_insert_work(worker, work, &worker->work_list);
975
976 raw_spin_unlock_irqrestore(&worker->lock, flags);
977}
978EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
979
980static void __kthread_queue_delayed_work(struct kthread_worker *worker,
981 struct kthread_delayed_work *dwork,
982 unsigned long delay)
983{
984 struct timer_list *timer = &dwork->timer;
985 struct kthread_work *work = &dwork->work;
986
987 WARN_ON_FUNCTION_MISMATCH(timer->function,
988 kthread_delayed_work_timer_fn);
989
990
991
992
993
994
995
996 if (!delay) {
997 kthread_insert_work(worker, work, &worker->work_list);
998 return;
999 }
1000
1001
1002 kthread_insert_work_sanity_check(worker, work);
1003
1004 list_add(&work->node, &worker->delayed_work_list);
1005 work->worker = worker;
1006 timer->expires = jiffies + delay;
1007 add_timer(timer);
1008}
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025bool kthread_queue_delayed_work(struct kthread_worker *worker,
1026 struct kthread_delayed_work *dwork,
1027 unsigned long delay)
1028{
1029 struct kthread_work *work = &dwork->work;
1030 unsigned long flags;
1031 bool ret = false;
1032
1033 raw_spin_lock_irqsave(&worker->lock, flags);
1034
1035 if (!queuing_blocked(worker, work)) {
1036 __kthread_queue_delayed_work(worker, dwork, delay);
1037 ret = true;
1038 }
1039
1040 raw_spin_unlock_irqrestore(&worker->lock, flags);
1041 return ret;
1042}
1043EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
1044
1045struct kthread_flush_work {
1046 struct kthread_work work;
1047 struct completion done;
1048};
1049
1050static void kthread_flush_work_fn(struct kthread_work *work)
1051{
1052 struct kthread_flush_work *fwork =
1053 container_of(work, struct kthread_flush_work, work);
1054 complete(&fwork->done);
1055}
1056
1057
1058
1059
1060
1061
1062
1063void kthread_flush_work(struct kthread_work *work)
1064{
1065 struct kthread_flush_work fwork = {
1066 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1067 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1068 };
1069 struct kthread_worker *worker;
1070 bool noop = false;
1071
1072 worker = work->worker;
1073 if (!worker)
1074 return;
1075
1076 raw_spin_lock_irq(&worker->lock);
1077
1078 WARN_ON_ONCE(work->worker != worker);
1079
1080 if (!list_empty(&work->node))
1081 kthread_insert_work(worker, &fwork.work, work->node.next);
1082 else if (worker->current_work == work)
1083 kthread_insert_work(worker, &fwork.work,
1084 worker->work_list.next);
1085 else
1086 noop = true;
1087
1088 raw_spin_unlock_irq(&worker->lock);
1089
1090 if (!noop)
1091 wait_for_completion(&fwork.done);
1092}
1093EXPORT_SYMBOL_GPL(kthread_flush_work);
1094
1095
1096
1097
1098
1099
1100
1101
1102static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
1103 unsigned long *flags)
1104{
1105 struct kthread_delayed_work *dwork =
1106 container_of(work, struct kthread_delayed_work, work);
1107 struct kthread_worker *worker = work->worker;
1108
1109
1110
1111
1112
1113
1114
1115 work->canceling++;
1116 raw_spin_unlock_irqrestore(&worker->lock, *flags);
1117 del_timer_sync(&dwork->timer);
1118 raw_spin_lock_irqsave(&worker->lock, *flags);
1119 work->canceling--;
1120}
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135static bool __kthread_cancel_work(struct kthread_work *work)
1136{
1137
1138
1139
1140
1141 if (!list_empty(&work->node)) {
1142 list_del_init(&work->node);
1143 return true;
1144 }
1145
1146 return false;
1147}
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172bool kthread_mod_delayed_work(struct kthread_worker *worker,
1173 struct kthread_delayed_work *dwork,
1174 unsigned long delay)
1175{
1176 struct kthread_work *work = &dwork->work;
1177 unsigned long flags;
1178 int ret = false;
1179
1180 raw_spin_lock_irqsave(&worker->lock, flags);
1181
1182
1183 if (!work->worker)
1184 goto fast_queue;
1185
1186
1187 WARN_ON_ONCE(work->worker != worker);
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201 kthread_cancel_delayed_work_timer(work, &flags);
1202 if (work->canceling)
1203 goto out;
1204 ret = __kthread_cancel_work(work);
1205
1206fast_queue:
1207 __kthread_queue_delayed_work(worker, dwork, delay);
1208out:
1209 raw_spin_unlock_irqrestore(&worker->lock, flags);
1210 return ret;
1211}
1212EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
1213
1214static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1215{
1216 struct kthread_worker *worker = work->worker;
1217 unsigned long flags;
1218 int ret = false;
1219
1220 if (!worker)
1221 goto out;
1222
1223 raw_spin_lock_irqsave(&worker->lock, flags);
1224
1225 WARN_ON_ONCE(work->worker != worker);
1226
1227 if (is_dwork)
1228 kthread_cancel_delayed_work_timer(work, &flags);
1229
1230 ret = __kthread_cancel_work(work);
1231
1232 if (worker->current_work != work)
1233 goto out_fast;
1234
1235
1236
1237
1238
1239 work->canceling++;
1240 raw_spin_unlock_irqrestore(&worker->lock, flags);
1241 kthread_flush_work(work);
1242 raw_spin_lock_irqsave(&worker->lock, flags);
1243 work->canceling--;
1244
1245out_fast:
1246 raw_spin_unlock_irqrestore(&worker->lock, flags);
1247out:
1248 return ret;
1249}
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267bool kthread_cancel_work_sync(struct kthread_work *work)
1268{
1269 return __kthread_cancel_work_sync(work, false);
1270}
1271EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
1283{
1284 return __kthread_cancel_work_sync(&dwork->work, true);
1285}
1286EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
1287
1288
1289
1290
1291
1292
1293
1294
1295void kthread_flush_worker(struct kthread_worker *worker)
1296{
1297 struct kthread_flush_work fwork = {
1298 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1299 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1300 };
1301
1302 kthread_queue_work(worker, &fwork.work);
1303 wait_for_completion(&fwork.done);
1304}
1305EXPORT_SYMBOL_GPL(kthread_flush_worker);
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315void kthread_destroy_worker(struct kthread_worker *worker)
1316{
1317 struct task_struct *task;
1318
1319 task = worker->task;
1320 if (WARN_ON(!task))
1321 return;
1322
1323 kthread_flush_worker(worker);
1324 kthread_stop(task);
1325 WARN_ON(!list_empty(&worker->work_list));
1326 kfree(worker);
1327}
1328EXPORT_SYMBOL(kthread_destroy_worker);
1329
1330
1331
1332
1333
1334void kthread_use_mm(struct mm_struct *mm)
1335{
1336 struct mm_struct *active_mm;
1337 struct task_struct *tsk = current;
1338
1339 WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1340 WARN_ON_ONCE(tsk->mm);
1341
1342 task_lock(tsk);
1343
1344 local_irq_disable();
1345 active_mm = tsk->active_mm;
1346 if (active_mm != mm) {
1347 mmgrab(mm);
1348 tsk->active_mm = mm;
1349 }
1350 tsk->mm = mm;
1351 membarrier_update_current_mm(mm);
1352 switch_mm_irqs_off(active_mm, mm, tsk);
1353 local_irq_enable();
1354 task_unlock(tsk);
1355#ifdef finish_arch_post_lock_switch
1356 finish_arch_post_lock_switch();
1357#endif
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368 if (active_mm != mm)
1369 mmdrop(active_mm);
1370 else
1371 smp_mb();
1372
1373 to_kthread(tsk)->oldfs = force_uaccess_begin();
1374}
1375EXPORT_SYMBOL_GPL(kthread_use_mm);
1376
1377
1378
1379
1380
1381void kthread_unuse_mm(struct mm_struct *mm)
1382{
1383 struct task_struct *tsk = current;
1384
1385 WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1386 WARN_ON_ONCE(!tsk->mm);
1387
1388 force_uaccess_end(to_kthread(tsk)->oldfs);
1389
1390 task_lock(tsk);
1391
1392
1393
1394
1395
1396
1397
1398 smp_mb__after_spinlock();
1399 sync_mm_rss(mm);
1400 local_irq_disable();
1401 tsk->mm = NULL;
1402 membarrier_update_current_mm(NULL);
1403
1404 enter_lazy_tlb(mm, tsk);
1405 local_irq_enable();
1406 task_unlock(tsk);
1407}
1408EXPORT_SYMBOL_GPL(kthread_unuse_mm);
1409
1410#ifdef CONFIG_BLK_CGROUP
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421void kthread_associate_blkcg(struct cgroup_subsys_state *css)
1422{
1423 struct kthread *kthread;
1424
1425 if (!(current->flags & PF_KTHREAD))
1426 return;
1427 kthread = to_kthread(current);
1428 if (!kthread)
1429 return;
1430
1431 if (kthread->blkcg_css) {
1432 css_put(kthread->blkcg_css);
1433 kthread->blkcg_css = NULL;
1434 }
1435 if (css) {
1436 css_get(css);
1437 kthread->blkcg_css = css;
1438 }
1439}
1440EXPORT_SYMBOL(kthread_associate_blkcg);
1441
1442
1443
1444
1445
1446
1447struct cgroup_subsys_state *kthread_blkcg(void)
1448{
1449 struct kthread *kthread;
1450
1451 if (current->flags & PF_KTHREAD) {
1452 kthread = to_kthread(current);
1453 if (kthread)
1454 return kthread->blkcg_css;
1455 }
1456 return NULL;
1457}
1458EXPORT_SYMBOL(kthread_blkcg);
1459#endif
1460