1
2
3
4
5
6
7
8
9#include <linux/rh_kabi.h>
10#include <uapi/linux/sched/types.h>
11#include RH_KABI_HIDE_INCLUDE(<linux/mm.h>)
12#include RH_KABI_HIDE_INCLUDE(<linux/mmu_context.h>)
13#include <linux/sched.h>
14#include RH_KABI_HIDE_INCLUDE(<linux/sched/mm.h>)
15#include <linux/sched/task.h>
16#include <linux/kthread.h>
17#include <linux/completion.h>
18#include <linux/err.h>
19#include <linux/cgroup.h>
20#include <linux/cpuset.h>
21#include <linux/unistd.h>
22#include <linux/file.h>
23#include <linux/export.h>
24#include <linux/mutex.h>
25#include <linux/slab.h>
26#include <linux/freezer.h>
27#include <linux/ptrace.h>
28#include <linux/uaccess.h>
29#include <linux/numa.h>
30#include <trace/events/sched.h>
31
32
33static DEFINE_SPINLOCK(kthread_create_lock);
34static LIST_HEAD(kthread_create_list);
35struct task_struct *kthreadd_task;
36
37struct kthread_create_info
38{
39
40 int (*threadfn)(void *data);
41 void *data;
42 int node;
43
44
45 struct task_struct *result;
46 struct completion *done;
47
48 struct list_head list;
49};
50
51struct kthread {
52 unsigned long flags;
53 unsigned int cpu;
54 int (*threadfn)(void *);
55 void *data;
56 mm_segment_t oldfs;
57 struct completion parked;
58 struct completion exited;
59#ifdef CONFIG_BLK_CGROUP
60 struct cgroup_subsys_state *blkcg_css;
61#endif
62};
63
64enum KTHREAD_BITS {
65 KTHREAD_IS_PER_CPU = 0,
66 KTHREAD_SHOULD_STOP,
67 KTHREAD_SHOULD_PARK,
68};
69
70
71
72
73
74
75
76enum hk_flags {
77 HK_FLAG_TIMER = 1,
78 HK_FLAG_RCU = (1 << 1),
79 HK_FLAG_MISC = (1 << 2),
80 HK_FLAG_SCHED = (1 << 3),
81 HK_FLAG_TICK = (1 << 4),
82 HK_FLAG_DOMAIN = (1 << 5),
83 HK_FLAG_WQ = (1 << 6),
84 HK_FLAG_MANAGED_IRQ = (1 << 7),
85 HK_FLAG_KTHREAD = (1 << 8),
86};
87
88#ifdef CONFIG_CPU_ISOLATION
89extern const struct cpumask *housekeeping_cpumask(enum hk_flags flags);
90#else
91static inline const struct cpumask *housekeeping_cpumask(enum hk_flags flags)
92{
93 return cpu_possible_mask;
94}
95#endif
96
97static inline void set_kthread_struct(void *kthread)
98{
99
100
101
102
103
104 current->set_child_tid = (__force void __user *)kthread;
105}
106
107static inline struct kthread *to_kthread(struct task_struct *k)
108{
109 WARN_ON(!(k->flags & PF_KTHREAD));
110 return (__force void *)k->set_child_tid;
111}
112
113void free_kthread_struct(struct task_struct *k)
114{
115 struct kthread *kthread;
116
117
118
119
120
121 kthread = to_kthread(k);
122#ifdef CONFIG_BLK_CGROUP
123 WARN_ON_ONCE(kthread && kthread->blkcg_css);
124#endif
125 kfree(kthread);
126}
127
128
129
130
131
132
133
134
135bool kthread_should_stop(void)
136{
137 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
138}
139EXPORT_SYMBOL(kthread_should_stop);
140
141bool __kthread_should_park(struct task_struct *k)
142{
143 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
144}
145EXPORT_SYMBOL_GPL(__kthread_should_park);
146
147
148
149
150
151
152
153
154
155
156
157
158bool kthread_should_park(void)
159{
160 return __kthread_should_park(current);
161}
162EXPORT_SYMBOL_GPL(kthread_should_park);
163
164
165
166
167
168
169
170
171
172
173bool kthread_freezable_should_stop(bool *was_frozen)
174{
175 bool frozen = false;
176
177 might_sleep();
178
179 if (unlikely(freezing(current)))
180 frozen = __refrigerator(true);
181
182 if (was_frozen)
183 *was_frozen = frozen;
184
185 return kthread_should_stop();
186}
187EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
188
189
190
191
192
193
194
195void *kthread_func(struct task_struct *task)
196{
197 if (task->flags & PF_KTHREAD)
198 return to_kthread(task)->threadfn;
199 return NULL;
200}
201EXPORT_SYMBOL_GPL(kthread_func);
202
203
204
205
206
207
208
209
210
211void *kthread_data(struct task_struct *task)
212{
213 return to_kthread(task)->data;
214}
215EXPORT_SYMBOL_GPL(kthread_data);
216
217
218
219
220
221
222
223
224
225
226void *kthread_probe_data(struct task_struct *task)
227{
228 struct kthread *kthread = to_kthread(task);
229 void *data = NULL;
230
231 probe_kernel_read(&data, &kthread->data, sizeof(data));
232 return data;
233}
234
235static void __kthread_parkme(struct kthread *self)
236{
237 for (;;) {
238
239
240
241
242
243
244
245
246
247 set_special_state(TASK_PARKED);
248 if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
249 break;
250
251
252
253
254
255
256 preempt_disable();
257 complete_all(&self->parked);
258 schedule_preempt_disabled();
259 preempt_enable();
260
261 }
262 __set_current_state(TASK_RUNNING);
263}
264
265void kthread_parkme(void)
266{
267 __kthread_parkme(to_kthread(current));
268}
269EXPORT_SYMBOL_GPL(kthread_parkme);
270
271static int kthread(void *_create)
272{
273
274 struct kthread_create_info *create = _create;
275 int (*threadfn)(void *data) = create->threadfn;
276 void *data = create->data;
277 struct completion *done;
278 struct kthread *self;
279 int ret;
280
281 self = kzalloc(sizeof(*self), GFP_KERNEL);
282 set_kthread_struct(self);
283
284
285 done = xchg(&create->done, NULL);
286 if (!done) {
287 kfree(create);
288 do_exit(-EINTR);
289 }
290
291 if (!self) {
292 create->result = ERR_PTR(-ENOMEM);
293 complete(done);
294 do_exit(-ENOMEM);
295 }
296
297 self->threadfn = threadfn;
298 self->data = data;
299 init_completion(&self->exited);
300 init_completion(&self->parked);
301 current->vfork_done = &self->exited;
302
303
304 __set_current_state(TASK_UNINTERRUPTIBLE);
305 create->result = current;
306
307
308
309
310 preempt_disable();
311 complete(done);
312 schedule_preempt_disabled();
313 preempt_enable();
314
315 ret = -EINTR;
316 if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
317 cgroup_kthread_ready();
318 __kthread_parkme(self);
319 ret = threadfn(data);
320 }
321 do_exit(ret);
322}
323
324
325int tsk_fork_get_node(struct task_struct *tsk)
326{
327#ifdef CONFIG_NUMA
328 if (tsk == kthreadd_task)
329 return tsk->pref_node_fork;
330#endif
331 return NUMA_NO_NODE;
332}
333
334static void create_kthread(struct kthread_create_info *create)
335{
336 int pid;
337
338#ifdef CONFIG_NUMA
339 current->pref_node_fork = create->node;
340#endif
341
342 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
343 if (pid < 0) {
344
345 struct completion *done = xchg(&create->done, NULL);
346
347 if (!done) {
348 kfree(create);
349 return;
350 }
351 create->result = ERR_PTR(pid);
352 complete(done);
353 }
354}
355
356static __printf(4, 0)
357struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
358 void *data, int node,
359 const char namefmt[],
360 va_list args)
361{
362 DECLARE_COMPLETION_ONSTACK(done);
363 struct task_struct *task;
364 struct kthread_create_info *create = kmalloc(sizeof(*create),
365 GFP_KERNEL);
366
367 if (!create)
368 return ERR_PTR(-ENOMEM);
369 create->threadfn = threadfn;
370 create->data = data;
371 create->node = node;
372 create->done = &done;
373
374 spin_lock(&kthread_create_lock);
375 list_add_tail(&create->list, &kthread_create_list);
376 spin_unlock(&kthread_create_lock);
377
378 wake_up_process(kthreadd_task);
379
380
381
382
383
384 if (unlikely(wait_for_completion_killable(&done))) {
385
386
387
388
389
390 if (xchg(&create->done, NULL))
391 return ERR_PTR(-EINTR);
392
393
394
395
396 wait_for_completion(&done);
397 }
398 task = create->result;
399 if (!IS_ERR(task)) {
400 static const struct sched_param param = { .sched_priority = 0 };
401 char name[TASK_COMM_LEN];
402
403
404
405
406
407 vsnprintf(name, sizeof(name), namefmt, args);
408 set_task_comm(task, name);
409
410
411
412
413 sched_setscheduler_nocheck(task, SCHED_NORMAL, ¶m);
414 set_cpus_allowed_ptr(task,
415 housekeeping_cpumask(HK_FLAG_KTHREAD));
416 }
417 kfree(create);
418 return task;
419}
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
445 void *data, int node,
446 const char namefmt[],
447 ...)
448{
449 struct task_struct *task;
450 va_list args;
451
452 va_start(args, namefmt);
453 task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
454 va_end(args);
455
456 return task;
457}
458EXPORT_SYMBOL(kthread_create_on_node);
459
460static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
461{
462 unsigned long flags;
463
464 if (!wait_task_inactive(p, state)) {
465 WARN_ON(1);
466 return;
467 }
468
469
470 raw_spin_lock_irqsave(&p->pi_lock, flags);
471 do_set_cpus_allowed(p, mask);
472 p->flags |= PF_NO_SETAFFINITY;
473 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
474}
475
476static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
477{
478 __kthread_bind_mask(p, cpumask_of(cpu), state);
479}
480
481void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
482{
483 __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
484}
485
486
487
488
489
490
491
492
493
494
495void kthread_bind(struct task_struct *p, unsigned int cpu)
496{
497 __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
498}
499EXPORT_SYMBOL(kthread_bind);
500
501
502
503
504
505
506
507
508
509
510
511
512struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
513 void *data, unsigned int cpu,
514 const char *namefmt)
515{
516 struct task_struct *p;
517
518 p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
519 cpu);
520 if (IS_ERR(p))
521 return p;
522 kthread_bind(p, cpu);
523
524 set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
525 to_kthread(p)->cpu = cpu;
526 return p;
527}
528
529
530
531
532
533
534
535
536
537void kthread_unpark(struct task_struct *k)
538{
539 struct kthread *kthread = to_kthread(k);
540
541
542
543
544
545 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
546 __kthread_bind(k, kthread->cpu, TASK_PARKED);
547
548 reinit_completion(&kthread->parked);
549 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
550
551
552
553 wake_up_state(k, TASK_PARKED);
554}
555EXPORT_SYMBOL_GPL(kthread_unpark);
556
557
558
559
560
561
562
563
564
565
566
567
568
569int kthread_park(struct task_struct *k)
570{
571 struct kthread *kthread = to_kthread(k);
572
573 if (WARN_ON(k->flags & PF_EXITING))
574 return -ENOSYS;
575
576 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
577 if (k != current) {
578 wake_up_process(k);
579
580
581
582
583 wait_for_completion(&kthread->parked);
584
585
586
587
588 WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
589 }
590
591 return 0;
592}
593EXPORT_SYMBOL_GPL(kthread_park);
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610int kthread_stop(struct task_struct *k)
611{
612 struct kthread *kthread;
613 int ret;
614
615 trace_sched_kthread_stop(k);
616
617 get_task_struct(k);
618 kthread = to_kthread(k);
619 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
620 kthread_unpark(k);
621 wake_up_process(k);
622 wait_for_completion(&kthread->exited);
623 ret = k->exit_code;
624 put_task_struct(k);
625
626 trace_sched_kthread_stop_ret(ret);
627 return ret;
628}
629EXPORT_SYMBOL(kthread_stop);
630
631int kthreadd(void *unused)
632{
633 struct task_struct *tsk = current;
634
635
636 set_task_comm(tsk, "kthreadd");
637 ignore_signals(tsk);
638 set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_FLAG_KTHREAD));
639 set_mems_allowed(node_states[N_MEMORY]);
640
641 current->flags |= PF_NOFREEZE;
642 cgroup_init_kthreadd();
643
644 for (;;) {
645 set_current_state(TASK_INTERRUPTIBLE);
646 if (list_empty(&kthread_create_list))
647 schedule();
648 __set_current_state(TASK_RUNNING);
649
650 spin_lock(&kthread_create_lock);
651 while (!list_empty(&kthread_create_list)) {
652 struct kthread_create_info *create;
653
654 create = list_entry(kthread_create_list.next,
655 struct kthread_create_info, list);
656 list_del_init(&create->list);
657 spin_unlock(&kthread_create_lock);
658
659 create_kthread(create);
660
661 spin_lock(&kthread_create_lock);
662 }
663 spin_unlock(&kthread_create_lock);
664 }
665
666 return 0;
667}
668
669void __kthread_init_worker(struct kthread_worker *worker,
670 const char *name,
671 struct lock_class_key *key)
672{
673 memset(worker, 0, sizeof(struct kthread_worker));
674 raw_spin_lock_init(&worker->lock);
675 lockdep_set_class_and_name(&worker->lock, key, name);
676 INIT_LIST_HEAD(&worker->work_list);
677 INIT_LIST_HEAD(&worker->delayed_work_list);
678}
679EXPORT_SYMBOL_GPL(__kthread_init_worker);
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696int kthread_worker_fn(void *worker_ptr)
697{
698 struct kthread_worker *worker = worker_ptr;
699 struct kthread_work *work;
700
701
702
703
704
705 WARN_ON(worker->task && worker->task != current);
706 worker->task = current;
707
708 if (worker->flags & KTW_FREEZABLE)
709 set_freezable();
710
711repeat:
712 set_current_state(TASK_INTERRUPTIBLE);
713
714 if (kthread_should_stop()) {
715 __set_current_state(TASK_RUNNING);
716 raw_spin_lock_irq(&worker->lock);
717 worker->task = NULL;
718 raw_spin_unlock_irq(&worker->lock);
719 return 0;
720 }
721
722 work = NULL;
723 raw_spin_lock_irq(&worker->lock);
724 if (!list_empty(&worker->work_list)) {
725 work = list_first_entry(&worker->work_list,
726 struct kthread_work, node);
727 list_del_init(&work->node);
728 }
729 worker->current_work = work;
730 raw_spin_unlock_irq(&worker->lock);
731
732 if (work) {
733 __set_current_state(TASK_RUNNING);
734 work->func(work);
735 } else if (!freezing(current))
736 schedule();
737
738 try_to_freeze();
739 cond_resched();
740 goto repeat;
741}
742EXPORT_SYMBOL_GPL(kthread_worker_fn);
743
744static __printf(3, 0) struct kthread_worker *
745__kthread_create_worker(int cpu, unsigned int flags,
746 const char namefmt[], va_list args)
747{
748 struct kthread_worker *worker;
749 struct task_struct *task;
750 int node = NUMA_NO_NODE;
751
752 worker = kzalloc(sizeof(*worker), GFP_KERNEL);
753 if (!worker)
754 return ERR_PTR(-ENOMEM);
755
756 kthread_init_worker(worker);
757
758 if (cpu >= 0)
759 node = cpu_to_node(cpu);
760
761 task = __kthread_create_on_node(kthread_worker_fn, worker,
762 node, namefmt, args);
763 if (IS_ERR(task))
764 goto fail_task;
765
766 if (cpu >= 0)
767 kthread_bind(task, cpu);
768
769 worker->flags = flags;
770 worker->task = task;
771 wake_up_process(task);
772 return worker;
773
774fail_task:
775 kfree(worker);
776 return ERR_CAST(task);
777}
778
779
780
781
782
783
784
785
786
787
788struct kthread_worker *
789kthread_create_worker(unsigned int flags, const char namefmt[], ...)
790{
791 struct kthread_worker *worker;
792 va_list args;
793
794 va_start(args, namefmt);
795 worker = __kthread_create_worker(-1, flags, namefmt, args);
796 va_end(args);
797
798 return worker;
799}
800EXPORT_SYMBOL(kthread_create_worker);
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819struct kthread_worker *
820kthread_create_worker_on_cpu(int cpu, unsigned int flags,
821 const char namefmt[], ...)
822{
823 struct kthread_worker *worker;
824 va_list args;
825
826 va_start(args, namefmt);
827 worker = __kthread_create_worker(cpu, flags, namefmt, args);
828 va_end(args);
829
830 return worker;
831}
832EXPORT_SYMBOL(kthread_create_worker_on_cpu);
833
834
835
836
837
838
839static inline bool queuing_blocked(struct kthread_worker *worker,
840 struct kthread_work *work)
841{
842 lockdep_assert_held(&worker->lock);
843
844 return !list_empty(&work->node) || work->canceling;
845}
846
847static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
848 struct kthread_work *work)
849{
850 lockdep_assert_held(&worker->lock);
851 WARN_ON_ONCE(!list_empty(&work->node));
852
853 WARN_ON_ONCE(work->worker && work->worker != worker);
854}
855
856
857static void kthread_insert_work(struct kthread_worker *worker,
858 struct kthread_work *work,
859 struct list_head *pos)
860{
861 kthread_insert_work_sanity_check(worker, work);
862
863 list_add_tail(&work->node, pos);
864 work->worker = worker;
865 if (!worker->current_work && likely(worker->task))
866 wake_up_process(worker->task);
867}
868
869
870
871
872
873
874
875
876
877
878
879
880
881bool kthread_queue_work(struct kthread_worker *worker,
882 struct kthread_work *work)
883{
884 bool ret = false;
885 unsigned long flags;
886
887 raw_spin_lock_irqsave(&worker->lock, flags);
888 if (!queuing_blocked(worker, work)) {
889 kthread_insert_work(worker, work, &worker->work_list);
890 ret = true;
891 }
892 raw_spin_unlock_irqrestore(&worker->lock, flags);
893 return ret;
894}
895EXPORT_SYMBOL_GPL(kthread_queue_work);
896
897
898
899
900
901
902
903
904
905void kthread_delayed_work_timer_fn(struct timer_list *t)
906{
907 struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
908 struct kthread_work *work = &dwork->work;
909 struct kthread_worker *worker = work->worker;
910 unsigned long flags;
911
912
913
914
915
916 if (WARN_ON_ONCE(!worker))
917 return;
918
919 raw_spin_lock_irqsave(&worker->lock, flags);
920
921 WARN_ON_ONCE(work->worker != worker);
922
923
924 WARN_ON_ONCE(list_empty(&work->node));
925 list_del_init(&work->node);
926 kthread_insert_work(worker, work, &worker->work_list);
927
928 raw_spin_unlock_irqrestore(&worker->lock, flags);
929}
930EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
931
932void __kthread_queue_delayed_work(struct kthread_worker *worker,
933 struct kthread_delayed_work *dwork,
934 unsigned long delay)
935{
936 struct timer_list *timer = &dwork->timer;
937 struct kthread_work *work = &dwork->work;
938
939 WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
940
941
942
943
944
945
946
947 if (!delay) {
948 kthread_insert_work(worker, work, &worker->work_list);
949 return;
950 }
951
952
953 kthread_insert_work_sanity_check(worker, work);
954
955 list_add(&work->node, &worker->delayed_work_list);
956 work->worker = worker;
957 timer->expires = jiffies + delay;
958 add_timer(timer);
959}
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976bool kthread_queue_delayed_work(struct kthread_worker *worker,
977 struct kthread_delayed_work *dwork,
978 unsigned long delay)
979{
980 struct kthread_work *work = &dwork->work;
981 unsigned long flags;
982 bool ret = false;
983
984 raw_spin_lock_irqsave(&worker->lock, flags);
985
986 if (!queuing_blocked(worker, work)) {
987 __kthread_queue_delayed_work(worker, dwork, delay);
988 ret = true;
989 }
990
991 raw_spin_unlock_irqrestore(&worker->lock, flags);
992 return ret;
993}
994EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
995
996struct kthread_flush_work {
997 struct kthread_work work;
998 struct completion done;
999};
1000
1001static void kthread_flush_work_fn(struct kthread_work *work)
1002{
1003 struct kthread_flush_work *fwork =
1004 container_of(work, struct kthread_flush_work, work);
1005 complete(&fwork->done);
1006}
1007
1008
1009
1010
1011
1012
1013
1014void kthread_flush_work(struct kthread_work *work)
1015{
1016 struct kthread_flush_work fwork = {
1017 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1018 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1019 };
1020 struct kthread_worker *worker;
1021 bool noop = false;
1022
1023 worker = work->worker;
1024 if (!worker)
1025 return;
1026
1027 raw_spin_lock_irq(&worker->lock);
1028
1029 WARN_ON_ONCE(work->worker != worker);
1030
1031 if (!list_empty(&work->node))
1032 kthread_insert_work(worker, &fwork.work, work->node.next);
1033 else if (worker->current_work == work)
1034 kthread_insert_work(worker, &fwork.work,
1035 worker->work_list.next);
1036 else
1037 noop = true;
1038
1039 raw_spin_unlock_irq(&worker->lock);
1040
1041 if (!noop)
1042 wait_for_completion(&fwork.done);
1043}
1044EXPORT_SYMBOL_GPL(kthread_flush_work);
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
1057 unsigned long *flags)
1058{
1059
1060 if (is_dwork) {
1061 struct kthread_delayed_work *dwork =
1062 container_of(work, struct kthread_delayed_work, work);
1063 struct kthread_worker *worker = work->worker;
1064
1065
1066
1067
1068
1069
1070
1071 work->canceling++;
1072 raw_spin_unlock_irqrestore(&worker->lock, *flags);
1073 del_timer_sync(&dwork->timer);
1074 raw_spin_lock_irqsave(&worker->lock, *flags);
1075 work->canceling--;
1076 }
1077
1078
1079
1080
1081
1082 if (!list_empty(&work->node)) {
1083 list_del_init(&work->node);
1084 return true;
1085 }
1086
1087 return false;
1088}
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113bool kthread_mod_delayed_work(struct kthread_worker *worker,
1114 struct kthread_delayed_work *dwork,
1115 unsigned long delay)
1116{
1117 struct kthread_work *work = &dwork->work;
1118 unsigned long flags;
1119 int ret = false;
1120
1121 raw_spin_lock_irqsave(&worker->lock, flags);
1122
1123
1124 if (!work->worker)
1125 goto fast_queue;
1126
1127
1128 WARN_ON_ONCE(work->worker != worker);
1129
1130
1131 if (work->canceling)
1132 goto out;
1133
1134 ret = __kthread_cancel_work(work, true, &flags);
1135fast_queue:
1136 __kthread_queue_delayed_work(worker, dwork, delay);
1137out:
1138 raw_spin_unlock_irqrestore(&worker->lock, flags);
1139 return ret;
1140}
1141EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
1142
1143static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1144{
1145 struct kthread_worker *worker = work->worker;
1146 unsigned long flags;
1147 int ret = false;
1148
1149 if (!worker)
1150 goto out;
1151
1152 raw_spin_lock_irqsave(&worker->lock, flags);
1153
1154 WARN_ON_ONCE(work->worker != worker);
1155
1156 ret = __kthread_cancel_work(work, is_dwork, &flags);
1157
1158 if (worker->current_work != work)
1159 goto out_fast;
1160
1161
1162
1163
1164
1165 work->canceling++;
1166 raw_spin_unlock_irqrestore(&worker->lock, flags);
1167 kthread_flush_work(work);
1168 raw_spin_lock_irqsave(&worker->lock, flags);
1169 work->canceling--;
1170
1171out_fast:
1172 raw_spin_unlock_irqrestore(&worker->lock, flags);
1173out:
1174 return ret;
1175}
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193bool kthread_cancel_work_sync(struct kthread_work *work)
1194{
1195 return __kthread_cancel_work_sync(work, false);
1196}
1197EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
1209{
1210 return __kthread_cancel_work_sync(&dwork->work, true);
1211}
1212EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
1213
1214
1215
1216
1217
1218
1219
1220
1221void kthread_flush_worker(struct kthread_worker *worker)
1222{
1223 struct kthread_flush_work fwork = {
1224 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1225 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1226 };
1227
1228 kthread_queue_work(worker, &fwork.work);
1229 wait_for_completion(&fwork.done);
1230}
1231EXPORT_SYMBOL_GPL(kthread_flush_worker);
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241void kthread_destroy_worker(struct kthread_worker *worker)
1242{
1243 struct task_struct *task;
1244
1245 task = worker->task;
1246 if (WARN_ON(!task))
1247 return;
1248
1249 kthread_flush_worker(worker);
1250 kthread_stop(task);
1251 WARN_ON(!list_empty(&worker->work_list));
1252 kfree(worker);
1253}
1254EXPORT_SYMBOL(kthread_destroy_worker);
1255
1256
1257
1258
1259
1260void kthread_use_mm(struct mm_struct *mm)
1261{
1262 struct task_struct *tsk = current;
1263
1264 WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1265 WARN_ON_ONCE(tsk->mm);
1266
1267 use_mm(mm);
1268 to_kthread(tsk)->oldfs = get_fs();
1269 set_fs(USER_DS);
1270}
1271EXPORT_SYMBOL_GPL(kthread_use_mm);
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283void kthread_unuse_mm(struct mm_struct *mm)
1284{
1285 struct task_struct *tsk = current;
1286
1287 WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1288 WARN_ON_ONCE(!tsk->mm);
1289
1290 set_fs(to_kthread(tsk)->oldfs);
1291
1292 unuse_mm(mm);
1293}
1294EXPORT_SYMBOL_GPL(kthread_unuse_mm);
1295
1296#ifdef CONFIG_BLK_CGROUP
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307void kthread_associate_blkcg(struct cgroup_subsys_state *css)
1308{
1309 struct kthread *kthread;
1310
1311 if (!(current->flags & PF_KTHREAD))
1312 return;
1313 kthread = to_kthread(current);
1314 if (!kthread)
1315 return;
1316
1317 if (kthread->blkcg_css) {
1318 css_put(kthread->blkcg_css);
1319 kthread->blkcg_css = NULL;
1320 }
1321 if (css) {
1322 css_get(css);
1323 kthread->blkcg_css = css;
1324 }
1325}
1326EXPORT_SYMBOL(kthread_associate_blkcg);
1327
1328
1329
1330
1331
1332
1333struct cgroup_subsys_state *kthread_blkcg(void)
1334{
1335 struct kthread *kthread;
1336
1337 if (current->flags & PF_KTHREAD) {
1338 kthread = to_kthread(current);
1339 if (kthread)
1340 return kthread->blkcg_css;
1341 }
1342 return NULL;
1343}
1344EXPORT_SYMBOL(kthread_blkcg);
1345#endif
1346