1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/export.h>
29#include <linux/kernel.h>
30#include <linux/sched.h>
31#include <linux/init.h>
32#include <linux/signal.h>
33#include <linux/completion.h>
34#include <linux/workqueue.h>
35#include <linux/slab.h>
36#include <linux/cpu.h>
37#include <linux/notifier.h>
38#include <linux/kthread.h>
39#include <linux/hardirq.h>
40#include <linux/mempolicy.h>
41#include <linux/freezer.h>
42#include <linux/debug_locks.h>
43#include <linux/lockdep.h>
44#include <linux/idr.h>
45#include <linux/jhash.h>
46#include <linux/hashtable.h>
47#include <linux/rculist.h>
48#include <linux/nodemask.h>
49#include <linux/moduleparam.h>
50#include <linux/uaccess.h>
51#include <linux/sched/isolation.h>
52#include <linux/nmi.h>
53
54#include "workqueue_internal.h"
55
56enum {
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73 POOL_MANAGER_ACTIVE = 1 << 0,
74 POOL_DISASSOCIATED = 1 << 2,
75
76
77 WORKER_DIE = 1 << 1,
78 WORKER_IDLE = 1 << 2,
79 WORKER_PREP = 1 << 3,
80 WORKER_CPU_INTENSIVE = 1 << 6,
81 WORKER_UNBOUND = 1 << 7,
82 WORKER_REBOUND = 1 << 8,
83
84 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_CPU_INTENSIVE |
85 WORKER_UNBOUND | WORKER_REBOUND,
86
87 NR_STD_WORKER_POOLS = 2,
88
89 UNBOUND_POOL_HASH_ORDER = 6,
90 BUSY_WORKER_HASH_ORDER = 6,
91
92 MAX_IDLE_WORKERS_RATIO = 4,
93 IDLE_WORKER_TIMEOUT = 300 * HZ,
94
95 MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2,
96
97
98 MAYDAY_INTERVAL = HZ / 10,
99 CREATE_COOLDOWN = HZ,
100
101
102
103
104
105 RESCUER_NICE_LEVEL = MIN_NICE,
106 HIGHPRI_NICE_LEVEL = MIN_NICE,
107
108 WQ_NAME_LEN = 24,
109};
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147struct worker_pool {
148 spinlock_t lock;
149 int cpu;
150 int node;
151 int id;
152 unsigned int flags;
153
154 unsigned long watchdog_ts;
155
156 struct list_head worklist;
157
158 int nr_workers;
159 int nr_idle;
160
161 struct list_head idle_list;
162 struct timer_list idle_timer;
163 struct timer_list mayday_timer;
164
165
166 DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
167
168
169 struct worker *manager;
170 struct list_head workers;
171 struct completion *detach_completion;
172
173 struct ida worker_ida;
174
175 struct workqueue_attrs *attrs;
176 struct hlist_node hash_node;
177 int refcnt;
178
179
180
181
182
183
184 atomic_t nr_running ____cacheline_aligned_in_smp;
185
186
187
188
189
190 struct rcu_head rcu;
191} ____cacheline_aligned_in_smp;
192
193
194
195
196
197
198
199struct pool_workqueue {
200 struct worker_pool *pool;
201 struct workqueue_struct *wq;
202 int work_color;
203 int flush_color;
204 int refcnt;
205 int nr_in_flight[WORK_NR_COLORS];
206
207 int nr_active;
208 int max_active;
209 struct list_head delayed_works;
210 struct list_head pwqs_node;
211 struct list_head mayday_node;
212
213
214
215
216
217
218
219 struct work_struct unbound_release_work;
220 struct rcu_head rcu;
221} __aligned(1 << WORK_STRUCT_FLAG_BITS);
222
223
224
225
226struct wq_flusher {
227 struct list_head list;
228 int flush_color;
229 struct completion done;
230};
231
232struct wq_device;
233
234
235
236
237
238struct workqueue_struct {
239 struct list_head pwqs;
240 struct list_head list;
241
242 struct mutex mutex;
243 int work_color;
244 int flush_color;
245 atomic_t nr_pwqs_to_flush;
246 struct wq_flusher *first_flusher;
247 struct list_head flusher_queue;
248 struct list_head flusher_overflow;
249
250 struct list_head maydays;
251 struct worker *rescuer;
252
253 int nr_drainers;
254 int saved_max_active;
255
256 struct workqueue_attrs *unbound_attrs;
257 struct pool_workqueue *dfl_pwq;
258
259#ifdef CONFIG_SYSFS
260 struct wq_device *wq_dev;
261#endif
262#ifdef CONFIG_LOCKDEP
263 char *lock_name;
264 struct lock_class_key key;
265 struct lockdep_map lockdep_map;
266#endif
267 char name[WQ_NAME_LEN];
268
269
270
271
272
273
274 struct rcu_head rcu;
275
276
277 unsigned int flags ____cacheline_aligned;
278 struct pool_workqueue __percpu *cpu_pwqs;
279 struct pool_workqueue __rcu *numa_pwq_tbl[];
280};
281
282static struct kmem_cache *pwq_cache;
283
284static cpumask_var_t *wq_numa_possible_cpumask;
285
286
287static bool wq_disable_numa;
288module_param_named(disable_numa, wq_disable_numa, bool, 0444);
289
290
291static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
292module_param_named(power_efficient, wq_power_efficient, bool, 0444);
293
294static bool wq_online;
295
296static bool wq_numa_enabled;
297
298
299static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
300
301static DEFINE_MUTEX(wq_pool_mutex);
302static DEFINE_MUTEX(wq_pool_attach_mutex);
303static DEFINE_SPINLOCK(wq_mayday_lock);
304static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait);
305
306static LIST_HEAD(workqueues);
307static bool workqueue_freezing;
308
309
310static cpumask_var_t wq_unbound_cpumask;
311
312
313static DEFINE_PER_CPU(int, wq_rr_cpu_last);
314
315
316
317
318
319
320#ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU
321static bool wq_debug_force_rr_cpu = true;
322#else
323static bool wq_debug_force_rr_cpu = false;
324#endif
325module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
326
327
328static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools);
329
330static DEFINE_IDR(worker_pool_idr);
331
332
333static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
334
335
336static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
337
338
339static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
340
341struct workqueue_struct *system_wq __read_mostly;
342EXPORT_SYMBOL(system_wq);
343struct workqueue_struct *system_highpri_wq __read_mostly;
344EXPORT_SYMBOL_GPL(system_highpri_wq);
345struct workqueue_struct *system_long_wq __read_mostly;
346EXPORT_SYMBOL_GPL(system_long_wq);
347struct workqueue_struct *system_unbound_wq __read_mostly;
348EXPORT_SYMBOL_GPL(system_unbound_wq);
349struct workqueue_struct *system_freezable_wq __read_mostly;
350EXPORT_SYMBOL_GPL(system_freezable_wq);
351struct workqueue_struct *system_power_efficient_wq __read_mostly;
352EXPORT_SYMBOL_GPL(system_power_efficient_wq);
353struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
354EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
355
356static int worker_thread(void *__worker);
357static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
358
359#define CREATE_TRACE_POINTS
360#include <trace/events/workqueue.h>
361
362#define assert_rcu_or_pool_mutex() \
363 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
364 !lockdep_is_held(&wq_pool_mutex), \
365 "RCU or wq_pool_mutex should be held")
366
367#define assert_rcu_or_wq_mutex(wq) \
368 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
369 !lockdep_is_held(&wq->mutex), \
370 "RCU or wq->mutex should be held")
371
372#define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \
373 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
374 !lockdep_is_held(&wq->mutex) && \
375 !lockdep_is_held(&wq_pool_mutex), \
376 "RCU, wq->mutex or wq_pool_mutex should be held")
377
378#define for_each_cpu_worker_pool(pool, cpu) \
379 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
380 (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
381 (pool)++)
382
383
384
385
386
387
388
389
390
391
392
393
394
395#define for_each_pool(pool, pi) \
396 idr_for_each_entry(&worker_pool_idr, pool, pi) \
397 if (({ assert_rcu_or_pool_mutex(); false; })) { } \
398 else
399
400
401
402
403
404
405
406
407
408
409
410#define for_each_pool_worker(worker, pool) \
411 list_for_each_entry((worker), &(pool)->workers, node) \
412 if (({ lockdep_assert_held(&wq_pool_attach_mutex); false; })) { } \
413 else
414
415
416
417
418
419
420
421
422
423
424
425
426
427#define for_each_pwq(pwq, wq) \
428 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node) \
429 if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \
430 else
431
432#ifdef CONFIG_DEBUG_OBJECTS_WORK
433
434static struct debug_obj_descr work_debug_descr;
435
436static void *work_debug_hint(void *addr)
437{
438 return ((struct work_struct *) addr)->func;
439}
440
441static bool work_is_static_object(void *addr)
442{
443 struct work_struct *work = addr;
444
445 return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work));
446}
447
448
449
450
451
452static bool work_fixup_init(void *addr, enum debug_obj_state state)
453{
454 struct work_struct *work = addr;
455
456 switch (state) {
457 case ODEBUG_STATE_ACTIVE:
458 cancel_work_sync(work);
459 debug_object_init(work, &work_debug_descr);
460 return true;
461 default:
462 return false;
463 }
464}
465
466
467
468
469
470static bool work_fixup_free(void *addr, enum debug_obj_state state)
471{
472 struct work_struct *work = addr;
473
474 switch (state) {
475 case ODEBUG_STATE_ACTIVE:
476 cancel_work_sync(work);
477 debug_object_free(work, &work_debug_descr);
478 return true;
479 default:
480 return false;
481 }
482}
483
484static struct debug_obj_descr work_debug_descr = {
485 .name = "work_struct",
486 .debug_hint = work_debug_hint,
487 .is_static_object = work_is_static_object,
488 .fixup_init = work_fixup_init,
489 .fixup_free = work_fixup_free,
490};
491
492static inline void debug_work_activate(struct work_struct *work)
493{
494 debug_object_activate(work, &work_debug_descr);
495}
496
497static inline void debug_work_deactivate(struct work_struct *work)
498{
499 debug_object_deactivate(work, &work_debug_descr);
500}
501
502void __init_work(struct work_struct *work, int onstack)
503{
504 if (onstack)
505 debug_object_init_on_stack(work, &work_debug_descr);
506 else
507 debug_object_init(work, &work_debug_descr);
508}
509EXPORT_SYMBOL_GPL(__init_work);
510
511void destroy_work_on_stack(struct work_struct *work)
512{
513 debug_object_free(work, &work_debug_descr);
514}
515EXPORT_SYMBOL_GPL(destroy_work_on_stack);
516
517void destroy_delayed_work_on_stack(struct delayed_work *work)
518{
519 destroy_timer_on_stack(&work->timer);
520 debug_object_free(&work->work, &work_debug_descr);
521}
522EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack);
523
524#else
525static inline void debug_work_activate(struct work_struct *work) { }
526static inline void debug_work_deactivate(struct work_struct *work) { }
527#endif
528
529
530
531
532
533
534
535
536static int worker_pool_assign_id(struct worker_pool *pool)
537{
538 int ret;
539
540 lockdep_assert_held(&wq_pool_mutex);
541
542 ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
543 GFP_KERNEL);
544 if (ret >= 0) {
545 pool->id = ret;
546 return 0;
547 }
548 return ret;
549}
550
551
552
553
554
555
556
557
558
559
560
561
562
563static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
564 int node)
565{
566 assert_rcu_or_wq_mutex_or_pool_mutex(wq);
567
568
569
570
571
572
573
574 if (unlikely(node == NUMA_NO_NODE))
575 return wq->dfl_pwq;
576
577 return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
578}
579
580static unsigned int work_color_to_flags(int color)
581{
582 return color << WORK_STRUCT_COLOR_SHIFT;
583}
584
585static int get_work_color(struct work_struct *work)
586{
587 return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
588 ((1 << WORK_STRUCT_COLOR_BITS) - 1);
589}
590
591static int work_next_color(int color)
592{
593 return (color + 1) % WORK_NR_COLORS;
594}
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616static inline void set_work_data(struct work_struct *work, unsigned long data,
617 unsigned long flags)
618{
619 WARN_ON_ONCE(!work_pending(work));
620 atomic_long_set(&work->data, data | flags | work_static(work));
621}
622
623static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
624 unsigned long extra_flags)
625{
626 set_work_data(work, (unsigned long)pwq,
627 WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
628}
629
630static void set_work_pool_and_keep_pending(struct work_struct *work,
631 int pool_id)
632{
633 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT,
634 WORK_STRUCT_PENDING);
635}
636
637static void set_work_pool_and_clear_pending(struct work_struct *work,
638 int pool_id)
639{
640
641
642
643
644
645
646 smp_wmb();
647 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676 smp_mb();
677}
678
679static void clear_work_data(struct work_struct *work)
680{
681 smp_wmb();
682 set_work_data(work, WORK_STRUCT_NO_POOL, 0);
683}
684
685static struct pool_workqueue *get_work_pwq(struct work_struct *work)
686{
687 unsigned long data = atomic_long_read(&work->data);
688
689 if (data & WORK_STRUCT_PWQ)
690 return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
691 else
692 return NULL;
693}
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710static struct worker_pool *get_work_pool(struct work_struct *work)
711{
712 unsigned long data = atomic_long_read(&work->data);
713 int pool_id;
714
715 assert_rcu_or_pool_mutex();
716
717 if (data & WORK_STRUCT_PWQ)
718 return ((struct pool_workqueue *)
719 (data & WORK_STRUCT_WQ_DATA_MASK))->pool;
720
721 pool_id = data >> WORK_OFFQ_POOL_SHIFT;
722 if (pool_id == WORK_OFFQ_POOL_NONE)
723 return NULL;
724
725 return idr_find(&worker_pool_idr, pool_id);
726}
727
728
729
730
731
732
733
734
735static int get_work_pool_id(struct work_struct *work)
736{
737 unsigned long data = atomic_long_read(&work->data);
738
739 if (data & WORK_STRUCT_PWQ)
740 return ((struct pool_workqueue *)
741 (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id;
742
743 return data >> WORK_OFFQ_POOL_SHIFT;
744}
745
746static void mark_work_canceling(struct work_struct *work)
747{
748 unsigned long pool_id = get_work_pool_id(work);
749
750 pool_id <<= WORK_OFFQ_POOL_SHIFT;
751 set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
752}
753
754static bool work_is_canceling(struct work_struct *work)
755{
756 unsigned long data = atomic_long_read(&work->data);
757
758 return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
759}
760
761
762
763
764
765
766
767static bool __need_more_worker(struct worker_pool *pool)
768{
769 return !atomic_read(&pool->nr_running);
770}
771
772
773
774
775
776
777
778
779
780static bool need_more_worker(struct worker_pool *pool)
781{
782 return !list_empty(&pool->worklist) && __need_more_worker(pool);
783}
784
785
786static bool may_start_working(struct worker_pool *pool)
787{
788 return pool->nr_idle;
789}
790
791
792static bool keep_working(struct worker_pool *pool)
793{
794 return !list_empty(&pool->worklist) &&
795 atomic_read(&pool->nr_running) <= 1;
796}
797
798
799static bool need_to_create_worker(struct worker_pool *pool)
800{
801 return need_more_worker(pool) && !may_start_working(pool);
802}
803
804
805static bool too_many_workers(struct worker_pool *pool)
806{
807 bool managing = pool->flags & POOL_MANAGER_ACTIVE;
808 int nr_idle = pool->nr_idle + managing;
809 int nr_busy = pool->nr_workers - nr_idle;
810
811 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
812}
813
814
815
816
817
818
819static struct worker *first_idle_worker(struct worker_pool *pool)
820{
821 if (unlikely(list_empty(&pool->idle_list)))
822 return NULL;
823
824 return list_first_entry(&pool->idle_list, struct worker, entry);
825}
826
827
828
829
830
831
832
833
834
835
836static void wake_up_worker(struct worker_pool *pool)
837{
838 struct worker *worker = first_idle_worker(pool);
839
840 if (likely(worker))
841 wake_up_process(worker->task);
842}
843
844
845
846
847
848
849
850void wq_worker_running(struct task_struct *task)
851{
852 struct worker *worker = kthread_data(task);
853
854 if (!worker->sleeping)
855 return;
856 if (!(worker->flags & WORKER_NOT_RUNNING))
857 atomic_inc(&worker->pool->nr_running);
858 worker->sleeping = 0;
859}
860
861
862
863
864
865
866
867
868void wq_worker_sleeping(struct task_struct *task)
869{
870 struct worker *next, *worker = kthread_data(task);
871 struct worker_pool *pool;
872
873
874
875
876
877
878 if (worker->flags & WORKER_NOT_RUNNING)
879 return;
880
881 pool = worker->pool;
882
883 if (WARN_ON_ONCE(worker->sleeping))
884 return;
885
886 worker->sleeping = 1;
887 spin_lock_irq(&pool->lock);
888
889
890
891
892
893
894
895
896
897
898
899
900 if (atomic_dec_and_test(&pool->nr_running) &&
901 !list_empty(&pool->worklist)) {
902 next = first_idle_worker(pool);
903 if (next)
904 wake_up_process(next->task);
905 }
906 spin_unlock_irq(&pool->lock);
907}
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933work_func_t wq_worker_last_func(struct task_struct *task)
934{
935 struct worker *worker = kthread_data(task);
936
937 return worker->last_func;
938}
939
940
941
942
943
944
945
946
947
948
949
950static inline void worker_set_flags(struct worker *worker, unsigned int flags)
951{
952 struct worker_pool *pool = worker->pool;
953
954 WARN_ON_ONCE(worker->task != current);
955
956
957 if ((flags & WORKER_NOT_RUNNING) &&
958 !(worker->flags & WORKER_NOT_RUNNING)) {
959 atomic_dec(&pool->nr_running);
960 }
961
962 worker->flags |= flags;
963}
964
965
966
967
968
969
970
971
972
973
974
975static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
976{
977 struct worker_pool *pool = worker->pool;
978 unsigned int oflags = worker->flags;
979
980 WARN_ON_ONCE(worker->task != current);
981
982 worker->flags &= ~flags;
983
984
985
986
987
988
989 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
990 if (!(worker->flags & WORKER_NOT_RUNNING))
991 atomic_inc(&pool->nr_running);
992}
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027static struct worker *find_worker_executing_work(struct worker_pool *pool,
1028 struct work_struct *work)
1029{
1030 struct worker *worker;
1031
1032 hash_for_each_possible(pool->busy_hash, worker, hentry,
1033 (unsigned long)work)
1034 if (worker->current_work == work &&
1035 worker->current_func == work->func)
1036 return worker;
1037
1038 return NULL;
1039}
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058static void move_linked_works(struct work_struct *work, struct list_head *head,
1059 struct work_struct **nextp)
1060{
1061 struct work_struct *n;
1062
1063
1064
1065
1066
1067 list_for_each_entry_safe_from(work, n, NULL, entry) {
1068 list_move_tail(&work->entry, head);
1069 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1070 break;
1071 }
1072
1073
1074
1075
1076
1077
1078 if (nextp)
1079 *nextp = n;
1080}
1081
1082
1083
1084
1085
1086
1087
1088
1089static void get_pwq(struct pool_workqueue *pwq)
1090{
1091 lockdep_assert_held(&pwq->pool->lock);
1092 WARN_ON_ONCE(pwq->refcnt <= 0);
1093 pwq->refcnt++;
1094}
1095
1096
1097
1098
1099
1100
1101
1102
1103static void put_pwq(struct pool_workqueue *pwq)
1104{
1105 lockdep_assert_held(&pwq->pool->lock);
1106 if (likely(--pwq->refcnt))
1107 return;
1108 if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND)))
1109 return;
1110
1111
1112
1113
1114
1115
1116
1117
1118 schedule_work(&pwq->unbound_release_work);
1119}
1120
1121
1122
1123
1124
1125
1126
1127static void put_pwq_unlocked(struct pool_workqueue *pwq)
1128{
1129 if (pwq) {
1130
1131
1132
1133
1134 spin_lock_irq(&pwq->pool->lock);
1135 put_pwq(pwq);
1136 spin_unlock_irq(&pwq->pool->lock);
1137 }
1138}
1139
1140static void pwq_activate_delayed_work(struct work_struct *work)
1141{
1142 struct pool_workqueue *pwq = get_work_pwq(work);
1143
1144 trace_workqueue_activate_work(work);
1145 if (list_empty(&pwq->pool->worklist))
1146 pwq->pool->watchdog_ts = jiffies;
1147 move_linked_works(work, &pwq->pool->worklist, NULL);
1148 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
1149 pwq->nr_active++;
1150}
1151
1152static void pwq_activate_first_delayed(struct pool_workqueue *pwq)
1153{
1154 struct work_struct *work = list_first_entry(&pwq->delayed_works,
1155 struct work_struct, entry);
1156
1157 pwq_activate_delayed_work(work);
1158}
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
1172{
1173
1174 if (color == WORK_NO_COLOR)
1175 goto out_put;
1176
1177 pwq->nr_in_flight[color]--;
1178
1179 pwq->nr_active--;
1180 if (!list_empty(&pwq->delayed_works)) {
1181
1182 if (pwq->nr_active < pwq->max_active)
1183 pwq_activate_first_delayed(pwq);
1184 }
1185
1186
1187 if (likely(pwq->flush_color != color))
1188 goto out_put;
1189
1190
1191 if (pwq->nr_in_flight[color])
1192 goto out_put;
1193
1194
1195 pwq->flush_color = -1;
1196
1197
1198
1199
1200
1201 if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
1202 complete(&pwq->wq->first_flusher->done);
1203out_put:
1204 put_pwq(pwq);
1205}
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
1235 unsigned long *flags)
1236{
1237 struct worker_pool *pool;
1238 struct pool_workqueue *pwq;
1239
1240 local_irq_save(*flags);
1241
1242
1243 if (is_dwork) {
1244 struct delayed_work *dwork = to_delayed_work(work);
1245
1246
1247
1248
1249
1250
1251 if (likely(del_timer(&dwork->timer)))
1252 return 1;
1253 }
1254
1255
1256 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
1257 return 0;
1258
1259 rcu_read_lock();
1260
1261
1262
1263
1264 pool = get_work_pool(work);
1265 if (!pool)
1266 goto fail;
1267
1268 spin_lock(&pool->lock);
1269
1270
1271
1272
1273
1274
1275
1276
1277 pwq = get_work_pwq(work);
1278 if (pwq && pwq->pool == pool) {
1279 debug_work_deactivate(work);
1280
1281
1282
1283
1284
1285
1286
1287
1288 if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
1289 pwq_activate_delayed_work(work);
1290
1291 list_del_init(&work->entry);
1292 pwq_dec_nr_in_flight(pwq, get_work_color(work));
1293
1294
1295 set_work_pool_and_keep_pending(work, pool->id);
1296
1297 spin_unlock(&pool->lock);
1298 rcu_read_unlock();
1299 return 1;
1300 }
1301 spin_unlock(&pool->lock);
1302fail:
1303 rcu_read_unlock();
1304 local_irq_restore(*flags);
1305 if (work_is_canceling(work))
1306 return -ENOENT;
1307 cpu_relax();
1308 return -EAGAIN;
1309}
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
1325 struct list_head *head, unsigned int extra_flags)
1326{
1327 struct worker_pool *pool = pwq->pool;
1328
1329
1330 set_work_pwq(work, pwq, extra_flags);
1331 list_add_tail(&work->entry, head);
1332 get_pwq(pwq);
1333
1334
1335
1336
1337
1338
1339 smp_mb();
1340
1341 if (__need_more_worker(pool))
1342 wake_up_worker(pool);
1343}
1344
1345
1346
1347
1348
1349static bool is_chained_work(struct workqueue_struct *wq)
1350{
1351 struct worker *worker;
1352
1353 worker = current_wq_worker();
1354
1355
1356
1357
1358 return worker && worker->current_pwq->wq == wq;
1359}
1360
1361
1362
1363
1364
1365
1366static int wq_select_unbound_cpu(int cpu)
1367{
1368 static bool printed_dbg_warning;
1369 int new_cpu;
1370
1371 if (likely(!wq_debug_force_rr_cpu)) {
1372 if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
1373 return cpu;
1374 } else if (!printed_dbg_warning) {
1375 pr_warn("workqueue: round-robin CPU selection forced, expect performance impact\n");
1376 printed_dbg_warning = true;
1377 }
1378
1379 if (cpumask_empty(wq_unbound_cpumask))
1380 return cpu;
1381
1382 new_cpu = __this_cpu_read(wq_rr_cpu_last);
1383 new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
1384 if (unlikely(new_cpu >= nr_cpu_ids)) {
1385 new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask);
1386 if (unlikely(new_cpu >= nr_cpu_ids))
1387 return cpu;
1388 }
1389 __this_cpu_write(wq_rr_cpu_last, new_cpu);
1390
1391 return new_cpu;
1392}
1393
1394static void __queue_work(int cpu, struct workqueue_struct *wq,
1395 struct work_struct *work)
1396{
1397 struct pool_workqueue *pwq;
1398 struct worker_pool *last_pool;
1399 struct list_head *worklist;
1400 unsigned int work_flags;
1401 unsigned int req_cpu = cpu;
1402
1403
1404
1405
1406
1407
1408
1409 lockdep_assert_irqs_disabled();
1410
1411 debug_work_activate(work);
1412
1413
1414 if (unlikely(wq->flags & __WQ_DRAINING) &&
1415 WARN_ON_ONCE(!is_chained_work(wq)))
1416 return;
1417 rcu_read_lock();
1418retry:
1419 if (req_cpu == WORK_CPU_UNBOUND)
1420 cpu = wq_select_unbound_cpu(raw_smp_processor_id());
1421
1422
1423 if (!(wq->flags & WQ_UNBOUND))
1424 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
1425 else
1426 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
1427
1428
1429
1430
1431
1432
1433 last_pool = get_work_pool(work);
1434 if (last_pool && last_pool != pwq->pool) {
1435 struct worker *worker;
1436
1437 spin_lock(&last_pool->lock);
1438
1439 worker = find_worker_executing_work(last_pool, work);
1440
1441 if (worker && worker->current_pwq->wq == wq) {
1442 pwq = worker->current_pwq;
1443 } else {
1444
1445 spin_unlock(&last_pool->lock);
1446 spin_lock(&pwq->pool->lock);
1447 }
1448 } else {
1449 spin_lock(&pwq->pool->lock);
1450 }
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460 if (unlikely(!pwq->refcnt)) {
1461 if (wq->flags & WQ_UNBOUND) {
1462 spin_unlock(&pwq->pool->lock);
1463 cpu_relax();
1464 goto retry;
1465 }
1466
1467 WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt",
1468 wq->name, cpu);
1469 }
1470
1471
1472 trace_workqueue_queue_work(req_cpu, pwq, work);
1473
1474 if (WARN_ON(!list_empty(&work->entry)))
1475 goto out;
1476
1477 pwq->nr_in_flight[pwq->work_color]++;
1478 work_flags = work_color_to_flags(pwq->work_color);
1479
1480 if (likely(pwq->nr_active < pwq->max_active)) {
1481 trace_workqueue_activate_work(work);
1482 pwq->nr_active++;
1483 worklist = &pwq->pool->worklist;
1484 if (list_empty(worklist))
1485 pwq->pool->watchdog_ts = jiffies;
1486 } else {
1487 work_flags |= WORK_STRUCT_DELAYED;
1488 worklist = &pwq->delayed_works;
1489 }
1490
1491 insert_work(pwq, work, worklist, work_flags);
1492
1493out:
1494 spin_unlock(&pwq->pool->lock);
1495 rcu_read_unlock();
1496}
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509bool queue_work_on(int cpu, struct workqueue_struct *wq,
1510 struct work_struct *work)
1511{
1512 bool ret = false;
1513 unsigned long flags;
1514
1515 local_irq_save(flags);
1516
1517 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1518 __queue_work(cpu, wq, work);
1519 ret = true;
1520 }
1521
1522 local_irq_restore(flags);
1523 return ret;
1524}
1525EXPORT_SYMBOL(queue_work_on);
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536static int workqueue_select_cpu_near(int node)
1537{
1538 int cpu;
1539
1540
1541 if (!wq_numa_enabled)
1542 return WORK_CPU_UNBOUND;
1543
1544
1545 if (node < 0 || node >= MAX_NUMNODES || !node_online(node))
1546 return WORK_CPU_UNBOUND;
1547
1548
1549 cpu = raw_smp_processor_id();
1550 if (node == cpu_to_node(cpu))
1551 return cpu;
1552
1553
1554 cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
1555
1556
1557 return cpu < nr_cpu_ids ? cpu : WORK_CPU_UNBOUND;
1558}
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580bool queue_work_node(int node, struct workqueue_struct *wq,
1581 struct work_struct *work)
1582{
1583 unsigned long flags;
1584 bool ret = false;
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595 WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND));
1596
1597 local_irq_save(flags);
1598
1599 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1600 int cpu = workqueue_select_cpu_near(node);
1601
1602 __queue_work(cpu, wq, work);
1603 ret = true;
1604 }
1605
1606 local_irq_restore(flags);
1607 return ret;
1608}
1609EXPORT_SYMBOL_GPL(queue_work_node);
1610
1611void delayed_work_timer_fn(struct timer_list *t)
1612{
1613 struct delayed_work *dwork = from_timer(dwork, t, timer);
1614
1615
1616 __queue_work(dwork->cpu, dwork->wq, &dwork->work);
1617}
1618EXPORT_SYMBOL(delayed_work_timer_fn);
1619
1620static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1621 struct delayed_work *dwork, unsigned long delay)
1622{
1623 struct timer_list *timer = &dwork->timer;
1624 struct work_struct *work = &dwork->work;
1625
1626 WARN_ON_ONCE(!wq);
1627 WARN_ON_ONCE(timer->function != delayed_work_timer_fn);
1628 WARN_ON_ONCE(timer_pending(timer));
1629 WARN_ON_ONCE(!list_empty(&work->entry));
1630
1631
1632
1633
1634
1635
1636
1637 if (!delay) {
1638 __queue_work(cpu, wq, &dwork->work);
1639 return;
1640 }
1641
1642 dwork->wq = wq;
1643 dwork->cpu = cpu;
1644 timer->expires = jiffies + delay;
1645
1646 if (unlikely(cpu != WORK_CPU_UNBOUND))
1647 add_timer_on(timer, cpu);
1648 else
1649 add_timer(timer);
1650}
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1664 struct delayed_work *dwork, unsigned long delay)
1665{
1666 struct work_struct *work = &dwork->work;
1667 bool ret = false;
1668 unsigned long flags;
1669
1670
1671 local_irq_save(flags);
1672
1673 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1674 __queue_delayed_work(cpu, wq, dwork, delay);
1675 ret = true;
1676 }
1677
1678 local_irq_restore(flags);
1679 return ret;
1680}
1681EXPORT_SYMBOL(queue_delayed_work_on);
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
1702 struct delayed_work *dwork, unsigned long delay)
1703{
1704 unsigned long flags;
1705 int ret;
1706
1707 do {
1708 ret = try_to_grab_pending(&dwork->work, true, &flags);
1709 } while (unlikely(ret == -EAGAIN));
1710
1711 if (likely(ret >= 0)) {
1712 __queue_delayed_work(cpu, wq, dwork, delay);
1713 local_irq_restore(flags);
1714 }
1715
1716
1717 return ret;
1718}
1719EXPORT_SYMBOL_GPL(mod_delayed_work_on);
1720
1721static void rcu_work_rcufn(struct rcu_head *rcu)
1722{
1723 struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu);
1724
1725
1726 local_irq_disable();
1727 __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work);
1728 local_irq_enable();
1729}
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork)
1742{
1743 struct work_struct *work = &rwork->work;
1744
1745 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1746 rwork->wq = wq;
1747 call_rcu(&rwork->rcu, rcu_work_rcufn);
1748 return true;
1749 }
1750
1751 return false;
1752}
1753EXPORT_SYMBOL(queue_rcu_work);
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765static void worker_enter_idle(struct worker *worker)
1766{
1767 struct worker_pool *pool = worker->pool;
1768
1769 if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
1770 WARN_ON_ONCE(!list_empty(&worker->entry) &&
1771 (worker->hentry.next || worker->hentry.pprev)))
1772 return;
1773
1774
1775 worker->flags |= WORKER_IDLE;
1776 pool->nr_idle++;
1777 worker->last_active = jiffies;
1778
1779
1780 list_add(&worker->entry, &pool->idle_list);
1781
1782 if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
1783 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
1784
1785
1786
1787
1788
1789
1790
1791 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
1792 pool->nr_workers == pool->nr_idle &&
1793 atomic_read(&pool->nr_running));
1794}
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805static void worker_leave_idle(struct worker *worker)
1806{
1807 struct worker_pool *pool = worker->pool;
1808
1809 if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
1810 return;
1811 worker_clr_flags(worker, WORKER_IDLE);
1812 pool->nr_idle--;
1813 list_del_init(&worker->entry);
1814}
1815
1816static struct worker *alloc_worker(int node)
1817{
1818 struct worker *worker;
1819
1820 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
1821 if (worker) {
1822 INIT_LIST_HEAD(&worker->entry);
1823 INIT_LIST_HEAD(&worker->scheduled);
1824 INIT_LIST_HEAD(&worker->node);
1825
1826 worker->flags = WORKER_PREP;
1827 }
1828 return worker;
1829}
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840static void worker_attach_to_pool(struct worker *worker,
1841 struct worker_pool *pool)
1842{
1843 mutex_lock(&wq_pool_attach_mutex);
1844
1845
1846
1847
1848
1849 set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
1850
1851
1852
1853
1854
1855
1856 if (pool->flags & POOL_DISASSOCIATED)
1857 worker->flags |= WORKER_UNBOUND;
1858
1859 list_add_tail(&worker->node, &pool->workers);
1860 worker->pool = pool;
1861
1862 mutex_unlock(&wq_pool_attach_mutex);
1863}
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873static void worker_detach_from_pool(struct worker *worker)
1874{
1875 struct worker_pool *pool = worker->pool;
1876 struct completion *detach_completion = NULL;
1877
1878 mutex_lock(&wq_pool_attach_mutex);
1879
1880 list_del(&worker->node);
1881 worker->pool = NULL;
1882
1883 if (list_empty(&pool->workers))
1884 detach_completion = pool->detach_completion;
1885 mutex_unlock(&wq_pool_attach_mutex);
1886
1887
1888 worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
1889
1890 if (detach_completion)
1891 complete(detach_completion);
1892}
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906static struct worker *create_worker(struct worker_pool *pool)
1907{
1908 struct worker *worker = NULL;
1909 int id = -1;
1910 char id_buf[16];
1911
1912
1913 id = ida_simple_get(&pool->worker_ida, 0, 0, GFP_KERNEL);
1914 if (id < 0)
1915 goto fail;
1916
1917 worker = alloc_worker(pool->node);
1918 if (!worker)
1919 goto fail;
1920
1921 worker->id = id;
1922
1923 if (pool->cpu >= 0)
1924 snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
1925 pool->attrs->nice < 0 ? "H" : "");
1926 else
1927 snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
1928
1929 worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
1930 "kworker/%s", id_buf);
1931 if (IS_ERR(worker->task))
1932 goto fail;
1933
1934 set_user_nice(worker->task, pool->attrs->nice);
1935 kthread_bind_mask(worker->task, pool->attrs->cpumask);
1936
1937
1938 worker_attach_to_pool(worker, pool);
1939
1940
1941 spin_lock_irq(&pool->lock);
1942 worker->pool->nr_workers++;
1943 worker_enter_idle(worker);
1944 wake_up_process(worker->task);
1945 spin_unlock_irq(&pool->lock);
1946
1947 return worker;
1948
1949fail:
1950 if (id >= 0)
1951 ida_simple_remove(&pool->worker_ida, id);
1952 kfree(worker);
1953 return NULL;
1954}
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966static void destroy_worker(struct worker *worker)
1967{
1968 struct worker_pool *pool = worker->pool;
1969
1970 lockdep_assert_held(&pool->lock);
1971
1972
1973 if (WARN_ON(worker->current_work) ||
1974 WARN_ON(!list_empty(&worker->scheduled)) ||
1975 WARN_ON(!(worker->flags & WORKER_IDLE)))
1976 return;
1977
1978 pool->nr_workers--;
1979 pool->nr_idle--;
1980
1981 list_del_init(&worker->entry);
1982 worker->flags |= WORKER_DIE;
1983 wake_up_process(worker->task);
1984}
1985
1986static void idle_worker_timeout(struct timer_list *t)
1987{
1988 struct worker_pool *pool = from_timer(pool, t, idle_timer);
1989
1990 spin_lock_irq(&pool->lock);
1991
1992 while (too_many_workers(pool)) {
1993 struct worker *worker;
1994 unsigned long expires;
1995
1996
1997 worker = list_entry(pool->idle_list.prev, struct worker, entry);
1998 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1999
2000 if (time_before(jiffies, expires)) {
2001 mod_timer(&pool->idle_timer, expires);
2002 break;
2003 }
2004
2005 destroy_worker(worker);
2006 }
2007
2008 spin_unlock_irq(&pool->lock);
2009}
2010
2011static void send_mayday(struct work_struct *work)
2012{
2013 struct pool_workqueue *pwq = get_work_pwq(work);
2014 struct workqueue_struct *wq = pwq->wq;
2015
2016 lockdep_assert_held(&wq_mayday_lock);
2017
2018 if (!wq->rescuer)
2019 return;
2020
2021
2022 if (list_empty(&pwq->mayday_node)) {
2023
2024
2025
2026
2027
2028 get_pwq(pwq);
2029 list_add_tail(&pwq->mayday_node, &wq->maydays);
2030 wake_up_process(wq->rescuer->task);
2031 }
2032}
2033
2034static void pool_mayday_timeout(struct timer_list *t)
2035{
2036 struct worker_pool *pool = from_timer(pool, t, mayday_timer);
2037 struct work_struct *work;
2038
2039 spin_lock_irq(&pool->lock);
2040 spin_lock(&wq_mayday_lock);
2041
2042 if (need_to_create_worker(pool)) {
2043
2044
2045
2046
2047
2048
2049 list_for_each_entry(work, &pool->worklist, entry)
2050 send_mayday(work);
2051 }
2052
2053 spin_unlock(&wq_mayday_lock);
2054 spin_unlock_irq(&pool->lock);
2055
2056 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
2057}
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077static void maybe_create_worker(struct worker_pool *pool)
2078__releases(&pool->lock)
2079__acquires(&pool->lock)
2080{
2081restart:
2082 spin_unlock_irq(&pool->lock);
2083
2084
2085 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
2086
2087 while (true) {
2088 if (create_worker(pool) || !need_to_create_worker(pool))
2089 break;
2090
2091 schedule_timeout_interruptible(CREATE_COOLDOWN);
2092
2093 if (!need_to_create_worker(pool))
2094 break;
2095 }
2096
2097 del_timer_sync(&pool->mayday_timer);
2098 spin_lock_irq(&pool->lock);
2099
2100
2101
2102
2103
2104 if (need_to_create_worker(pool))
2105 goto restart;
2106}
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130static bool manage_workers(struct worker *worker)
2131{
2132 struct worker_pool *pool = worker->pool;
2133
2134 if (pool->flags & POOL_MANAGER_ACTIVE)
2135 return false;
2136
2137 pool->flags |= POOL_MANAGER_ACTIVE;
2138 pool->manager = worker;
2139
2140 maybe_create_worker(pool);
2141
2142 pool->manager = NULL;
2143 pool->flags &= ~POOL_MANAGER_ACTIVE;
2144 wake_up(&wq_manager_wait);
2145 return true;
2146}
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162static void process_one_work(struct worker *worker, struct work_struct *work)
2163__releases(&pool->lock)
2164__acquires(&pool->lock)
2165{
2166 struct pool_workqueue *pwq = get_work_pwq(work);
2167 struct worker_pool *pool = worker->pool;
2168 bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
2169 int work_color;
2170 struct worker *collision;
2171#ifdef CONFIG_LOCKDEP
2172
2173
2174
2175
2176
2177
2178
2179 struct lockdep_map lockdep_map;
2180
2181 lockdep_copy_map(&lockdep_map, &work->lockdep_map);
2182#endif
2183
2184 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
2185 raw_smp_processor_id() != pool->cpu);
2186
2187
2188
2189
2190
2191
2192
2193 collision = find_worker_executing_work(pool, work);
2194 if (unlikely(collision)) {
2195 move_linked_works(work, &collision->scheduled, NULL);
2196 return;
2197 }
2198
2199
2200 debug_work_deactivate(work);
2201 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
2202 worker->current_work = work;
2203 worker->current_func = work->func;
2204 worker->current_pwq = pwq;
2205 work_color = get_work_color(work);
2206
2207
2208
2209
2210
2211 strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN);
2212
2213 list_del_init(&work->entry);
2214
2215
2216
2217
2218
2219
2220
2221 if (unlikely(cpu_intensive))
2222 worker_set_flags(worker, WORKER_CPU_INTENSIVE);
2223
2224
2225
2226
2227
2228
2229
2230
2231 if (need_more_worker(pool))
2232 wake_up_worker(pool);
2233
2234
2235
2236
2237
2238
2239
2240 set_work_pool_and_clear_pending(work, pool->id);
2241
2242 spin_unlock_irq(&pool->lock);
2243
2244 lock_map_acquire(&pwq->wq->lockdep_map);
2245 lock_map_acquire(&lockdep_map);
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267 lockdep_invariant_state(true);
2268 trace_workqueue_execute_start(work);
2269 worker->current_func(work);
2270
2271
2272
2273
2274 trace_workqueue_execute_end(work);
2275 lock_map_release(&lockdep_map);
2276 lock_map_release(&pwq->wq->lockdep_map);
2277
2278 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
2279 pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
2280 " last function: %ps\n",
2281 current->comm, preempt_count(), task_pid_nr(current),
2282 worker->current_func);
2283 debug_show_held_locks(current);
2284 dump_stack();
2285 }
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295 cond_resched();
2296
2297 spin_lock_irq(&pool->lock);
2298
2299
2300 if (unlikely(cpu_intensive))
2301 worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
2302
2303
2304 worker->last_func = worker->current_func;
2305
2306
2307 hash_del(&worker->hentry);
2308 worker->current_work = NULL;
2309 worker->current_func = NULL;
2310 worker->current_pwq = NULL;
2311 pwq_dec_nr_in_flight(pwq, work_color);
2312}
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326static void process_scheduled_works(struct worker *worker)
2327{
2328 while (!list_empty(&worker->scheduled)) {
2329 struct work_struct *work = list_first_entry(&worker->scheduled,
2330 struct work_struct, entry);
2331 process_one_work(worker, work);
2332 }
2333}
2334
2335static void set_pf_worker(bool val)
2336{
2337 mutex_lock(&wq_pool_attach_mutex);
2338 if (val)
2339 current->flags |= PF_WQ_WORKER;
2340 else
2341 current->flags &= ~PF_WQ_WORKER;
2342 mutex_unlock(&wq_pool_attach_mutex);
2343}
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357static int worker_thread(void *__worker)
2358{
2359 struct worker *worker = __worker;
2360 struct worker_pool *pool = worker->pool;
2361
2362
2363 set_pf_worker(true);
2364woke_up:
2365 spin_lock_irq(&pool->lock);
2366
2367
2368 if (unlikely(worker->flags & WORKER_DIE)) {
2369 spin_unlock_irq(&pool->lock);
2370 WARN_ON_ONCE(!list_empty(&worker->entry));
2371 set_pf_worker(false);
2372
2373 set_task_comm(worker->task, "kworker/dying");
2374 ida_simple_remove(&pool->worker_ida, worker->id);
2375 worker_detach_from_pool(worker);
2376 kfree(worker);
2377 return 0;
2378 }
2379
2380 worker_leave_idle(worker);
2381recheck:
2382
2383 if (!need_more_worker(pool))
2384 goto sleep;
2385
2386
2387 if (unlikely(!may_start_working(pool)) && manage_workers(worker))
2388 goto recheck;
2389
2390
2391
2392
2393
2394
2395 WARN_ON_ONCE(!list_empty(&worker->scheduled));
2396
2397
2398
2399
2400
2401
2402
2403
2404 worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
2405
2406 do {
2407 struct work_struct *work =
2408 list_first_entry(&pool->worklist,
2409 struct work_struct, entry);
2410
2411 pool->watchdog_ts = jiffies;
2412
2413 if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
2414
2415 process_one_work(worker, work);
2416 if (unlikely(!list_empty(&worker->scheduled)))
2417 process_scheduled_works(worker);
2418 } else {
2419 move_linked_works(work, &worker->scheduled, NULL);
2420 process_scheduled_works(worker);
2421 }
2422 } while (keep_working(pool));
2423
2424 worker_set_flags(worker, WORKER_PREP);
2425sleep:
2426
2427
2428
2429
2430
2431
2432
2433 worker_enter_idle(worker);
2434 __set_current_state(TASK_IDLE);
2435 spin_unlock_irq(&pool->lock);
2436 schedule();
2437 goto woke_up;
2438}
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461static int rescuer_thread(void *__rescuer)
2462{
2463 struct worker *rescuer = __rescuer;
2464 struct workqueue_struct *wq = rescuer->rescue_wq;
2465 struct list_head *scheduled = &rescuer->scheduled;
2466 bool should_stop;
2467
2468 set_user_nice(current, RESCUER_NICE_LEVEL);
2469
2470
2471
2472
2473
2474 set_pf_worker(true);
2475repeat:
2476 set_current_state(TASK_IDLE);
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486 should_stop = kthread_should_stop();
2487
2488
2489 spin_lock_irq(&wq_mayday_lock);
2490
2491 while (!list_empty(&wq->maydays)) {
2492 struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
2493 struct pool_workqueue, mayday_node);
2494 struct worker_pool *pool = pwq->pool;
2495 struct work_struct *work, *n;
2496 bool first = true;
2497
2498 __set_current_state(TASK_RUNNING);
2499 list_del_init(&pwq->mayday_node);
2500
2501 spin_unlock_irq(&wq_mayday_lock);
2502
2503 worker_attach_to_pool(rescuer, pool);
2504
2505 spin_lock_irq(&pool->lock);
2506
2507
2508
2509
2510
2511 WARN_ON_ONCE(!list_empty(scheduled));
2512 list_for_each_entry_safe(work, n, &pool->worklist, entry) {
2513 if (get_work_pwq(work) == pwq) {
2514 if (first)
2515 pool->watchdog_ts = jiffies;
2516 move_linked_works(work, scheduled, &n);
2517 }
2518 first = false;
2519 }
2520
2521 if (!list_empty(scheduled)) {
2522 process_scheduled_works(rescuer);
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533 if (need_to_create_worker(pool)) {
2534 spin_lock(&wq_mayday_lock);
2535 get_pwq(pwq);
2536 list_move_tail(&pwq->mayday_node, &wq->maydays);
2537 spin_unlock(&wq_mayday_lock);
2538 }
2539 }
2540
2541
2542
2543
2544
2545 put_pwq(pwq);
2546
2547
2548
2549
2550
2551
2552 if (need_more_worker(pool))
2553 wake_up_worker(pool);
2554
2555 spin_unlock_irq(&pool->lock);
2556
2557 worker_detach_from_pool(rescuer);
2558
2559 spin_lock_irq(&wq_mayday_lock);
2560 }
2561
2562 spin_unlock_irq(&wq_mayday_lock);
2563
2564 if (should_stop) {
2565 __set_current_state(TASK_RUNNING);
2566 set_pf_worker(false);
2567 return 0;
2568 }
2569
2570
2571 WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
2572 schedule();
2573 goto repeat;
2574}
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587static void check_flush_dependency(struct workqueue_struct *target_wq,
2588 struct work_struct *target_work)
2589{
2590 work_func_t target_func = target_work ? target_work->func : NULL;
2591 struct worker *worker;
2592
2593 if (target_wq->flags & WQ_MEM_RECLAIM)
2594 return;
2595
2596 worker = current_wq_worker();
2597
2598 WARN_ONCE(current->flags & PF_MEMALLOC,
2599 "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%ps",
2600 current->pid, current->comm, target_wq->name, target_func);
2601 WARN_ONCE(worker && ((worker->current_pwq->wq->flags &
2602 (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM),
2603 "workqueue: WQ_MEM_RECLAIM %s:%ps is flushing !WQ_MEM_RECLAIM %s:%ps",
2604 worker->current_pwq->wq->name, worker->current_func,
2605 target_wq->name, target_func);
2606}
2607
2608struct wq_barrier {
2609 struct work_struct work;
2610 struct completion done;
2611 struct task_struct *task;
2612};
2613
2614static void wq_barrier_func(struct work_struct *work)
2615{
2616 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
2617 complete(&barr->done);
2618}
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644static void insert_wq_barrier(struct pool_workqueue *pwq,
2645 struct wq_barrier *barr,
2646 struct work_struct *target, struct worker *worker)
2647{
2648 struct list_head *head;
2649 unsigned int linked = 0;
2650
2651
2652
2653
2654
2655
2656
2657 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
2658 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
2659
2660 init_completion_map(&barr->done, &target->lockdep_map);
2661
2662 barr->task = current;
2663
2664
2665
2666
2667
2668 if (worker)
2669 head = worker->scheduled.next;
2670 else {
2671 unsigned long *bits = work_data_bits(target);
2672
2673 head = target->entry.next;
2674
2675 linked = *bits & WORK_STRUCT_LINKED;
2676 __set_bit(WORK_STRUCT_LINKED_BIT, bits);
2677 }
2678
2679 debug_work_activate(&barr->work);
2680 insert_work(pwq, &barr->work, head,
2681 work_color_to_flags(WORK_NO_COLOR) | linked);
2682}
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
2716 int flush_color, int work_color)
2717{
2718 bool wait = false;
2719 struct pool_workqueue *pwq;
2720
2721 if (flush_color >= 0) {
2722 WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush));
2723 atomic_set(&wq->nr_pwqs_to_flush, 1);
2724 }
2725
2726 for_each_pwq(pwq, wq) {
2727 struct worker_pool *pool = pwq->pool;
2728
2729 spin_lock_irq(&pool->lock);
2730
2731 if (flush_color >= 0) {
2732 WARN_ON_ONCE(pwq->flush_color != -1);
2733
2734 if (pwq->nr_in_flight[flush_color]) {
2735 pwq->flush_color = flush_color;
2736 atomic_inc(&wq->nr_pwqs_to_flush);
2737 wait = true;
2738 }
2739 }
2740
2741 if (work_color >= 0) {
2742 WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
2743 pwq->work_color = work_color;
2744 }
2745
2746 spin_unlock_irq(&pool->lock);
2747 }
2748
2749 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
2750 complete(&wq->first_flusher->done);
2751
2752 return wait;
2753}
2754
2755
2756
2757
2758
2759
2760
2761
2762void flush_workqueue(struct workqueue_struct *wq)
2763{
2764 struct wq_flusher this_flusher = {
2765 .list = LIST_HEAD_INIT(this_flusher.list),
2766 .flush_color = -1,
2767 .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map),
2768 };
2769 int next_color;
2770
2771 if (WARN_ON(!wq_online))
2772 return;
2773
2774 lock_map_acquire(&wq->lockdep_map);
2775 lock_map_release(&wq->lockdep_map);
2776
2777 mutex_lock(&wq->mutex);
2778
2779
2780
2781
2782 next_color = work_next_color(wq->work_color);
2783
2784 if (next_color != wq->flush_color) {
2785
2786
2787
2788
2789
2790 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
2791 this_flusher.flush_color = wq->work_color;
2792 wq->work_color = next_color;
2793
2794 if (!wq->first_flusher) {
2795
2796 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
2797
2798 wq->first_flusher = &this_flusher;
2799
2800 if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
2801 wq->work_color)) {
2802
2803 wq->flush_color = next_color;
2804 wq->first_flusher = NULL;
2805 goto out_unlock;
2806 }
2807 } else {
2808
2809 WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
2810 list_add_tail(&this_flusher.list, &wq->flusher_queue);
2811 flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2812 }
2813 } else {
2814
2815
2816
2817
2818
2819 list_add_tail(&this_flusher.list, &wq->flusher_overflow);
2820 }
2821
2822 check_flush_dependency(wq, NULL);
2823
2824 mutex_unlock(&wq->mutex);
2825
2826 wait_for_completion(&this_flusher.done);
2827
2828
2829
2830
2831
2832
2833
2834 if (wq->first_flusher != &this_flusher)
2835 return;
2836
2837 mutex_lock(&wq->mutex);
2838
2839
2840 if (wq->first_flusher != &this_flusher)
2841 goto out_unlock;
2842
2843 wq->first_flusher = NULL;
2844
2845 WARN_ON_ONCE(!list_empty(&this_flusher.list));
2846 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
2847
2848 while (true) {
2849 struct wq_flusher *next, *tmp;
2850
2851
2852 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
2853 if (next->flush_color != wq->flush_color)
2854 break;
2855 list_del_init(&next->list);
2856 complete(&next->done);
2857 }
2858
2859 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
2860 wq->flush_color != work_next_color(wq->work_color));
2861
2862
2863 wq->flush_color = work_next_color(wq->flush_color);
2864
2865
2866 if (!list_empty(&wq->flusher_overflow)) {
2867
2868
2869
2870
2871
2872
2873 list_for_each_entry(tmp, &wq->flusher_overflow, list)
2874 tmp->flush_color = wq->work_color;
2875
2876 wq->work_color = work_next_color(wq->work_color);
2877
2878 list_splice_tail_init(&wq->flusher_overflow,
2879 &wq->flusher_queue);
2880 flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2881 }
2882
2883 if (list_empty(&wq->flusher_queue)) {
2884 WARN_ON_ONCE(wq->flush_color != wq->work_color);
2885 break;
2886 }
2887
2888
2889
2890
2891
2892 WARN_ON_ONCE(wq->flush_color == wq->work_color);
2893 WARN_ON_ONCE(wq->flush_color != next->flush_color);
2894
2895 list_del_init(&next->list);
2896 wq->first_flusher = next;
2897
2898 if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
2899 break;
2900
2901
2902
2903
2904
2905 wq->first_flusher = NULL;
2906 }
2907
2908out_unlock:
2909 mutex_unlock(&wq->mutex);
2910}
2911EXPORT_SYMBOL(flush_workqueue);
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924void drain_workqueue(struct workqueue_struct *wq)
2925{
2926 unsigned int flush_cnt = 0;
2927 struct pool_workqueue *pwq;
2928
2929
2930
2931
2932
2933
2934 mutex_lock(&wq->mutex);
2935 if (!wq->nr_drainers++)
2936 wq->flags |= __WQ_DRAINING;
2937 mutex_unlock(&wq->mutex);
2938reflush:
2939 flush_workqueue(wq);
2940
2941 mutex_lock(&wq->mutex);
2942
2943 for_each_pwq(pwq, wq) {
2944 bool drained;
2945
2946 spin_lock_irq(&pwq->pool->lock);
2947 drained = !pwq->nr_active && list_empty(&pwq->delayed_works);
2948 spin_unlock_irq(&pwq->pool->lock);
2949
2950 if (drained)
2951 continue;
2952
2953 if (++flush_cnt == 10 ||
2954 (flush_cnt % 100 == 0 && flush_cnt <= 1000))
2955 pr_warn("workqueue %s: drain_workqueue() isn't complete after %u tries\n",
2956 wq->name, flush_cnt);
2957
2958 mutex_unlock(&wq->mutex);
2959 goto reflush;
2960 }
2961
2962 if (!--wq->nr_drainers)
2963 wq->flags &= ~__WQ_DRAINING;
2964 mutex_unlock(&wq->mutex);
2965}
2966EXPORT_SYMBOL_GPL(drain_workqueue);
2967
2968static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
2969 bool from_cancel)
2970{
2971 struct worker *worker = NULL;
2972 struct worker_pool *pool;
2973 struct pool_workqueue *pwq;
2974
2975 might_sleep();
2976
2977 rcu_read_lock();
2978 pool = get_work_pool(work);
2979 if (!pool) {
2980 rcu_read_unlock();
2981 return false;
2982 }
2983
2984 spin_lock_irq(&pool->lock);
2985
2986 pwq = get_work_pwq(work);
2987 if (pwq) {
2988 if (unlikely(pwq->pool != pool))
2989 goto already_gone;
2990 } else {
2991 worker = find_worker_executing_work(pool, work);
2992 if (!worker)
2993 goto already_gone;
2994 pwq = worker->current_pwq;
2995 }
2996
2997 check_flush_dependency(pwq->wq, work);
2998
2999 insert_wq_barrier(pwq, barr, work, worker);
3000 spin_unlock_irq(&pool->lock);
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011 if (!from_cancel &&
3012 (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) {
3013 lock_map_acquire(&pwq->wq->lockdep_map);
3014 lock_map_release(&pwq->wq->lockdep_map);
3015 }
3016 rcu_read_unlock();
3017 return true;
3018already_gone:
3019 spin_unlock_irq(&pool->lock);
3020 rcu_read_unlock();
3021 return false;
3022}
3023
3024static bool __flush_work(struct work_struct *work, bool from_cancel)
3025{
3026 struct wq_barrier barr;
3027
3028 if (WARN_ON(!wq_online))
3029 return false;
3030
3031 if (WARN_ON(!work->func))
3032 return false;
3033
3034 if (!from_cancel) {
3035 lock_map_acquire(&work->lockdep_map);
3036 lock_map_release(&work->lockdep_map);
3037 }
3038
3039 if (start_flush_work(work, &barr, from_cancel)) {
3040 wait_for_completion(&barr.done);
3041 destroy_work_on_stack(&barr.work);
3042 return true;
3043 } else {
3044 return false;
3045 }
3046}
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059bool flush_work(struct work_struct *work)
3060{
3061 return __flush_work(work, false);
3062}
3063EXPORT_SYMBOL_GPL(flush_work);
3064
3065struct cwt_wait {
3066 wait_queue_entry_t wait;
3067 struct work_struct *work;
3068};
3069
3070static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
3071{
3072 struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
3073
3074 if (cwait->work != key)
3075 return 0;
3076 return autoremove_wake_function(wait, mode, sync, key);
3077}
3078
3079static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
3080{
3081 static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
3082 unsigned long flags;
3083 int ret;
3084
3085 do {
3086 ret = try_to_grab_pending(work, is_dwork, &flags);
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103 if (unlikely(ret == -ENOENT)) {
3104 struct cwt_wait cwait;
3105
3106 init_wait(&cwait.wait);
3107 cwait.wait.func = cwt_wakefn;
3108 cwait.work = work;
3109
3110 prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
3111 TASK_UNINTERRUPTIBLE);
3112 if (work_is_canceling(work))
3113 schedule();
3114 finish_wait(&cancel_waitq, &cwait.wait);
3115 }
3116 } while (unlikely(ret < 0));
3117
3118
3119 mark_work_canceling(work);
3120 local_irq_restore(flags);
3121
3122
3123
3124
3125
3126 if (wq_online)
3127 __flush_work(work, true);
3128
3129 clear_work_data(work);
3130
3131
3132
3133
3134
3135
3136 smp_mb();
3137 if (waitqueue_active(&cancel_waitq))
3138 __wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
3139
3140 return ret;
3141}
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161bool cancel_work_sync(struct work_struct *work)
3162{
3163 return __cancel_work_timer(work, false);
3164}
3165EXPORT_SYMBOL_GPL(cancel_work_sync);
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179bool flush_delayed_work(struct delayed_work *dwork)
3180{
3181 local_irq_disable();
3182 if (del_timer_sync(&dwork->timer))
3183 __queue_work(dwork->cpu, dwork->wq, &dwork->work);
3184 local_irq_enable();
3185 return flush_work(&dwork->work);
3186}
3187EXPORT_SYMBOL(flush_delayed_work);
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197bool flush_rcu_work(struct rcu_work *rwork)
3198{
3199 if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) {
3200 rcu_barrier();
3201 flush_work(&rwork->work);
3202 return true;
3203 } else {
3204 return flush_work(&rwork->work);
3205 }
3206}
3207EXPORT_SYMBOL(flush_rcu_work);
3208
3209static bool __cancel_work(struct work_struct *work, bool is_dwork)
3210{
3211 unsigned long flags;
3212 int ret;
3213
3214 do {
3215 ret = try_to_grab_pending(work, is_dwork, &flags);
3216 } while (unlikely(ret == -EAGAIN));
3217
3218 if (unlikely(ret < 0))
3219 return false;
3220
3221 set_work_pool_and_clear_pending(work, get_work_pool_id(work));
3222 local_irq_restore(flags);
3223 return ret;
3224}
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242bool cancel_delayed_work(struct delayed_work *dwork)
3243{
3244 return __cancel_work(&dwork->work, true);
3245}
3246EXPORT_SYMBOL(cancel_delayed_work);
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257bool cancel_delayed_work_sync(struct delayed_work *dwork)
3258{
3259 return __cancel_work_timer(&dwork->work, true);
3260}
3261EXPORT_SYMBOL(cancel_delayed_work_sync);
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274int schedule_on_each_cpu(work_func_t func)
3275{
3276 int cpu;
3277 struct work_struct __percpu *works;
3278
3279 works = alloc_percpu(struct work_struct);
3280 if (!works)
3281 return -ENOMEM;
3282
3283 get_online_cpus();
3284
3285 for_each_online_cpu(cpu) {
3286 struct work_struct *work = per_cpu_ptr(works, cpu);
3287
3288 INIT_WORK(work, func);
3289 schedule_work_on(cpu, work);
3290 }
3291
3292 for_each_online_cpu(cpu)
3293 flush_work(per_cpu_ptr(works, cpu));
3294
3295 put_online_cpus();
3296 free_percpu(works);
3297 return 0;
3298}
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312int execute_in_process_context(work_func_t fn, struct execute_work *ew)
3313{
3314 if (!in_interrupt()) {
3315 fn(&ew->work);
3316 return 0;
3317 }
3318
3319 INIT_WORK(&ew->work, fn);
3320 schedule_work(&ew->work);
3321
3322 return 1;
3323}
3324EXPORT_SYMBOL_GPL(execute_in_process_context);
3325
3326
3327
3328
3329
3330
3331
3332void free_workqueue_attrs(struct workqueue_attrs *attrs)
3333{
3334 if (attrs) {
3335 free_cpumask_var(attrs->cpumask);
3336 kfree(attrs);
3337 }
3338}
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348struct workqueue_attrs *alloc_workqueue_attrs(void)
3349{
3350 struct workqueue_attrs *attrs;
3351
3352 attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
3353 if (!attrs)
3354 goto fail;
3355 if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL))
3356 goto fail;
3357
3358 cpumask_copy(attrs->cpumask, cpu_possible_mask);
3359 return attrs;
3360fail:
3361 free_workqueue_attrs(attrs);
3362 return NULL;
3363}
3364
3365static void copy_workqueue_attrs(struct workqueue_attrs *to,
3366 const struct workqueue_attrs *from)
3367{
3368 to->nice = from->nice;
3369 cpumask_copy(to->cpumask, from->cpumask);
3370
3371
3372
3373
3374
3375 to->no_numa = from->no_numa;
3376}
3377
3378
3379static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
3380{
3381 u32 hash = 0;
3382
3383 hash = jhash_1word(attrs->nice, hash);
3384 hash = jhash(cpumask_bits(attrs->cpumask),
3385 BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
3386 return hash;
3387}
3388
3389
3390static bool wqattrs_equal(const struct workqueue_attrs *a,
3391 const struct workqueue_attrs *b)
3392{
3393 if (a->nice != b->nice)
3394 return false;
3395 if (!cpumask_equal(a->cpumask, b->cpumask))
3396 return false;
3397 return true;
3398}
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410static int init_worker_pool(struct worker_pool *pool)
3411{
3412 spin_lock_init(&pool->lock);
3413 pool->id = -1;
3414 pool->cpu = -1;
3415 pool->node = NUMA_NO_NODE;
3416 pool->flags |= POOL_DISASSOCIATED;
3417 pool->watchdog_ts = jiffies;
3418 INIT_LIST_HEAD(&pool->worklist);
3419 INIT_LIST_HEAD(&pool->idle_list);
3420 hash_init(pool->busy_hash);
3421
3422 timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE);
3423
3424 timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0);
3425
3426 INIT_LIST_HEAD(&pool->workers);
3427
3428 ida_init(&pool->worker_ida);
3429 INIT_HLIST_NODE(&pool->hash_node);
3430 pool->refcnt = 1;
3431
3432
3433 pool->attrs = alloc_workqueue_attrs();
3434 if (!pool->attrs)
3435 return -ENOMEM;
3436 return 0;
3437}
3438
3439#ifdef CONFIG_LOCKDEP
3440static void wq_init_lockdep(struct workqueue_struct *wq)
3441{
3442 char *lock_name;
3443
3444 lockdep_register_key(&wq->key);
3445 lock_name = kasprintf(GFP_KERNEL, "%s%s", "(wq_completion)", wq->name);
3446 if (!lock_name)
3447 lock_name = wq->name;
3448
3449 wq->lock_name = lock_name;
3450 lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0);
3451}
3452
3453static void wq_unregister_lockdep(struct workqueue_struct *wq)
3454{
3455 lockdep_unregister_key(&wq->key);
3456}
3457
3458static void wq_free_lockdep(struct workqueue_struct *wq)
3459{
3460 if (wq->lock_name != wq->name)
3461 kfree(wq->lock_name);
3462}
3463#else
3464static void wq_init_lockdep(struct workqueue_struct *wq)
3465{
3466}
3467
3468static void wq_unregister_lockdep(struct workqueue_struct *wq)
3469{
3470}
3471
3472static void wq_free_lockdep(struct workqueue_struct *wq)
3473{
3474}
3475#endif
3476
3477static void rcu_free_wq(struct rcu_head *rcu)
3478{
3479 struct workqueue_struct *wq =
3480 container_of(rcu, struct workqueue_struct, rcu);
3481
3482 wq_free_lockdep(wq);
3483
3484 if (!(wq->flags & WQ_UNBOUND))
3485 free_percpu(wq->cpu_pwqs);
3486 else
3487 free_workqueue_attrs(wq->unbound_attrs);
3488
3489 kfree(wq->rescuer);
3490 kfree(wq);
3491}
3492
3493static void rcu_free_pool(struct rcu_head *rcu)
3494{
3495 struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
3496
3497 ida_destroy(&pool->worker_ida);
3498 free_workqueue_attrs(pool->attrs);
3499 kfree(pool);
3500}
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513static void put_unbound_pool(struct worker_pool *pool)
3514{
3515 DECLARE_COMPLETION_ONSTACK(detach_completion);
3516 struct worker *worker;
3517
3518 lockdep_assert_held(&wq_pool_mutex);
3519
3520 if (--pool->refcnt)
3521 return;
3522
3523
3524 if (WARN_ON(!(pool->cpu < 0)) ||
3525 WARN_ON(!list_empty(&pool->worklist)))
3526 return;
3527
3528
3529 if (pool->id >= 0)
3530 idr_remove(&worker_pool_idr, pool->id);
3531 hash_del(&pool->hash_node);
3532
3533
3534
3535
3536
3537
3538 spin_lock_irq(&pool->lock);
3539 wait_event_lock_irq(wq_manager_wait,
3540 !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
3541 pool->flags |= POOL_MANAGER_ACTIVE;
3542
3543 while ((worker = first_idle_worker(pool)))
3544 destroy_worker(worker);
3545 WARN_ON(pool->nr_workers || pool->nr_idle);
3546 spin_unlock_irq(&pool->lock);
3547
3548 mutex_lock(&wq_pool_attach_mutex);
3549 if (!list_empty(&pool->workers))
3550 pool->detach_completion = &detach_completion;
3551 mutex_unlock(&wq_pool_attach_mutex);
3552
3553 if (pool->detach_completion)
3554 wait_for_completion(pool->detach_completion);
3555
3556
3557 del_timer_sync(&pool->idle_timer);
3558 del_timer_sync(&pool->mayday_timer);
3559
3560
3561 call_rcu(&pool->rcu, rcu_free_pool);
3562}
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
3579{
3580 u32 hash = wqattrs_hash(attrs);
3581 struct worker_pool *pool;
3582 int node;
3583 int target_node = NUMA_NO_NODE;
3584
3585 lockdep_assert_held(&wq_pool_mutex);
3586
3587
3588 hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
3589 if (wqattrs_equal(pool->attrs, attrs)) {
3590 pool->refcnt++;
3591 return pool;
3592 }
3593 }
3594
3595
3596 if (wq_numa_enabled) {
3597 for_each_node(node) {
3598 if (cpumask_subset(attrs->cpumask,
3599 wq_numa_possible_cpumask[node])) {
3600 target_node = node;
3601 break;
3602 }
3603 }
3604 }
3605
3606
3607 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, target_node);
3608 if (!pool || init_worker_pool(pool) < 0)
3609 goto fail;
3610
3611 lockdep_set_subclass(&pool->lock, 1);
3612 copy_workqueue_attrs(pool->attrs, attrs);
3613 pool->node = target_node;
3614
3615
3616
3617
3618
3619 pool->attrs->no_numa = false;
3620
3621 if (worker_pool_assign_id(pool) < 0)
3622 goto fail;
3623
3624
3625 if (wq_online && !create_worker(pool))
3626 goto fail;
3627
3628
3629 hash_add(unbound_pool_hash, &pool->hash_node, hash);
3630
3631 return pool;
3632fail:
3633 if (pool)
3634 put_unbound_pool(pool);
3635 return NULL;
3636}
3637
3638static void rcu_free_pwq(struct rcu_head *rcu)
3639{
3640 kmem_cache_free(pwq_cache,
3641 container_of(rcu, struct pool_workqueue, rcu));
3642}
3643
3644
3645
3646
3647
3648static void pwq_unbound_release_workfn(struct work_struct *work)
3649{
3650 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
3651 unbound_release_work);
3652 struct workqueue_struct *wq = pwq->wq;
3653 struct worker_pool *pool = pwq->pool;
3654 bool is_last;
3655
3656 if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
3657 return;
3658
3659 mutex_lock(&wq->mutex);
3660 list_del_rcu(&pwq->pwqs_node);
3661 is_last = list_empty(&wq->pwqs);
3662 mutex_unlock(&wq->mutex);
3663
3664 mutex_lock(&wq_pool_mutex);
3665 put_unbound_pool(pool);
3666 mutex_unlock(&wq_pool_mutex);
3667
3668 call_rcu(&pwq->rcu, rcu_free_pwq);
3669
3670
3671
3672
3673
3674 if (is_last) {
3675 wq_unregister_lockdep(wq);
3676 call_rcu(&wq->rcu, rcu_free_wq);
3677 }
3678}
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688static void pwq_adjust_max_active(struct pool_workqueue *pwq)
3689{
3690 struct workqueue_struct *wq = pwq->wq;
3691 bool freezable = wq->flags & WQ_FREEZABLE;
3692 unsigned long flags;
3693
3694
3695 lockdep_assert_held(&wq->mutex);
3696
3697
3698 if (!freezable && pwq->max_active == wq->saved_max_active)
3699 return;
3700
3701
3702 spin_lock_irqsave(&pwq->pool->lock, flags);
3703
3704
3705
3706
3707
3708
3709 if (!freezable || !workqueue_freezing) {
3710 pwq->max_active = wq->saved_max_active;
3711
3712 while (!list_empty(&pwq->delayed_works) &&
3713 pwq->nr_active < pwq->max_active)
3714 pwq_activate_first_delayed(pwq);
3715
3716
3717
3718
3719
3720 wake_up_worker(pwq->pool);
3721 } else {
3722 pwq->max_active = 0;
3723 }
3724
3725 spin_unlock_irqrestore(&pwq->pool->lock, flags);
3726}
3727
3728
3729static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
3730 struct worker_pool *pool)
3731{
3732 BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
3733
3734 memset(pwq, 0, sizeof(*pwq));
3735
3736 pwq->pool = pool;
3737 pwq->wq = wq;
3738 pwq->flush_color = -1;
3739 pwq->refcnt = 1;
3740 INIT_LIST_HEAD(&pwq->delayed_works);
3741 INIT_LIST_HEAD(&pwq->pwqs_node);
3742 INIT_LIST_HEAD(&pwq->mayday_node);
3743 INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
3744}
3745
3746
3747static void link_pwq(struct pool_workqueue *pwq)
3748{
3749 struct workqueue_struct *wq = pwq->wq;
3750
3751 lockdep_assert_held(&wq->mutex);
3752
3753
3754 if (!list_empty(&pwq->pwqs_node))
3755 return;
3756
3757
3758 pwq->work_color = wq->work_color;
3759
3760
3761 pwq_adjust_max_active(pwq);
3762
3763
3764 list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
3765}
3766
3767
3768static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
3769 const struct workqueue_attrs *attrs)
3770{
3771 struct worker_pool *pool;
3772 struct pool_workqueue *pwq;
3773
3774 lockdep_assert_held(&wq_pool_mutex);
3775
3776 pool = get_unbound_pool(attrs);
3777 if (!pool)
3778 return NULL;
3779
3780 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
3781 if (!pwq) {
3782 put_unbound_pool(pool);
3783 return NULL;
3784 }
3785
3786 init_pwq(pwq, wq, pool);
3787 return pwq;
3788}
3789
3790
3791
3792
3793
3794
3795
3796
3797
3798
3799
3800
3801
3802
3803
3804
3805
3806
3807
3808
3809
3810
3811
3812static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node,
3813 int cpu_going_down, cpumask_t *cpumask)
3814{
3815 if (!wq_numa_enabled || attrs->no_numa)
3816 goto use_dfl;
3817
3818
3819 cpumask_and(cpumask, cpumask_of_node(node), attrs->cpumask);
3820 if (cpu_going_down >= 0)
3821 cpumask_clear_cpu(cpu_going_down, cpumask);
3822
3823 if (cpumask_empty(cpumask))
3824 goto use_dfl;
3825
3826
3827 cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]);
3828
3829 if (cpumask_empty(cpumask)) {
3830 pr_warn_once("WARNING: workqueue cpumask: online intersect > "
3831 "possible intersect\n");
3832 return false;
3833 }
3834
3835 return !cpumask_equal(cpumask, attrs->cpumask);
3836
3837use_dfl:
3838 cpumask_copy(cpumask, attrs->cpumask);
3839 return false;
3840}
3841
3842
3843static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq,
3844 int node,
3845 struct pool_workqueue *pwq)
3846{
3847 struct pool_workqueue *old_pwq;
3848
3849 lockdep_assert_held(&wq_pool_mutex);
3850 lockdep_assert_held(&wq->mutex);
3851
3852
3853 link_pwq(pwq);
3854
3855 old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
3856 rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq);
3857 return old_pwq;
3858}
3859
3860
3861struct apply_wqattrs_ctx {
3862 struct workqueue_struct *wq;
3863 struct workqueue_attrs *attrs;
3864 struct list_head list;
3865 struct pool_workqueue *dfl_pwq;
3866 struct pool_workqueue *pwq_tbl[];
3867};
3868
3869
3870static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx)
3871{
3872 if (ctx) {
3873 int node;
3874
3875 for_each_node(node)
3876 put_pwq_unlocked(ctx->pwq_tbl[node]);
3877 put_pwq_unlocked(ctx->dfl_pwq);
3878
3879 free_workqueue_attrs(ctx->attrs);
3880
3881 kfree(ctx);
3882 }
3883}
3884
3885
3886static struct apply_wqattrs_ctx *
3887apply_wqattrs_prepare(struct workqueue_struct *wq,
3888 const struct workqueue_attrs *attrs)
3889{
3890 struct apply_wqattrs_ctx *ctx;
3891 struct workqueue_attrs *new_attrs, *tmp_attrs;
3892 int node;
3893
3894 lockdep_assert_held(&wq_pool_mutex);
3895
3896 ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_node_ids), GFP_KERNEL);
3897
3898 new_attrs = alloc_workqueue_attrs();
3899 tmp_attrs = alloc_workqueue_attrs();
3900 if (!ctx || !new_attrs || !tmp_attrs)
3901 goto out_free;
3902
3903
3904
3905
3906
3907
3908 copy_workqueue_attrs(new_attrs, attrs);
3909 cpumask_and(new_attrs->cpumask, new_attrs->cpumask, wq_unbound_cpumask);
3910 if (unlikely(cpumask_empty(new_attrs->cpumask)))
3911 cpumask_copy(new_attrs->cpumask, wq_unbound_cpumask);
3912
3913
3914
3915
3916
3917
3918 copy_workqueue_attrs(tmp_attrs, new_attrs);
3919
3920
3921
3922
3923
3924
3925 ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
3926 if (!ctx->dfl_pwq)
3927 goto out_free;
3928
3929 for_each_node(node) {
3930 if (wq_calc_node_cpumask(new_attrs, node, -1, tmp_attrs->cpumask)) {
3931 ctx->pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs);
3932 if (!ctx->pwq_tbl[node])
3933 goto out_free;
3934 } else {
3935 ctx->dfl_pwq->refcnt++;
3936 ctx->pwq_tbl[node] = ctx->dfl_pwq;
3937 }
3938 }
3939
3940
3941 copy_workqueue_attrs(new_attrs, attrs);
3942 cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
3943 ctx->attrs = new_attrs;
3944
3945 ctx->wq = wq;
3946 free_workqueue_attrs(tmp_attrs);
3947 return ctx;
3948
3949out_free:
3950 free_workqueue_attrs(tmp_attrs);
3951 free_workqueue_attrs(new_attrs);
3952 apply_wqattrs_cleanup(ctx);
3953 return NULL;
3954}
3955
3956
3957static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)
3958{
3959 int node;
3960
3961
3962 mutex_lock(&ctx->wq->mutex);
3963
3964 copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs);
3965
3966
3967 for_each_node(node)
3968 ctx->pwq_tbl[node] = numa_pwq_tbl_install(ctx->wq, node,
3969 ctx->pwq_tbl[node]);
3970
3971
3972 link_pwq(ctx->dfl_pwq);
3973 swap(ctx->wq->dfl_pwq, ctx->dfl_pwq);
3974
3975 mutex_unlock(&ctx->wq->mutex);
3976}
3977
3978static void apply_wqattrs_lock(void)
3979{
3980
3981 get_online_cpus();
3982 mutex_lock(&wq_pool_mutex);
3983}
3984
3985static void apply_wqattrs_unlock(void)
3986{
3987 mutex_unlock(&wq_pool_mutex);
3988 put_online_cpus();
3989}
3990
3991static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
3992 const struct workqueue_attrs *attrs)
3993{
3994 struct apply_wqattrs_ctx *ctx;
3995
3996
3997 if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
3998 return -EINVAL;
3999
4000
4001 if (!list_empty(&wq->pwqs)) {
4002 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
4003 return -EINVAL;
4004
4005 wq->flags &= ~__WQ_ORDERED;
4006 }
4007
4008 ctx = apply_wqattrs_prepare(wq, attrs);
4009 if (!ctx)
4010 return -ENOMEM;
4011
4012
4013 apply_wqattrs_commit(ctx);
4014 apply_wqattrs_cleanup(ctx);
4015
4016 return 0;
4017}
4018
4019
4020
4021
4022
4023
4024
4025
4026
4027
4028
4029
4030
4031
4032
4033
4034
4035
4036
4037int apply_workqueue_attrs(struct workqueue_struct *wq,
4038 const struct workqueue_attrs *attrs)
4039{
4040 int ret;
4041
4042 lockdep_assert_cpus_held();
4043
4044 mutex_lock(&wq_pool_mutex);
4045 ret = apply_workqueue_attrs_locked(wq, attrs);
4046 mutex_unlock(&wq_pool_mutex);
4047
4048 return ret;
4049}
4050
4051
4052
4053
4054
4055
4056
4057
4058
4059
4060
4061
4062
4063
4064
4065
4066
4067
4068
4069
4070
4071
4072
4073static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
4074 bool online)
4075{
4076 int node = cpu_to_node(cpu);
4077 int cpu_off = online ? -1 : cpu;
4078 struct pool_workqueue *old_pwq = NULL, *pwq;
4079 struct workqueue_attrs *target_attrs;
4080 cpumask_t *cpumask;
4081
4082 lockdep_assert_held(&wq_pool_mutex);
4083
4084 if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND) ||
4085 wq->unbound_attrs->no_numa)
4086 return;
4087
4088
4089
4090
4091
4092
4093 target_attrs = wq_update_unbound_numa_attrs_buf;
4094 cpumask = target_attrs->cpumask;
4095
4096 copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
4097 pwq = unbound_pwq_by_node(wq, node);
4098
4099
4100
4101
4102
4103
4104
4105 if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, cpu_off, cpumask)) {
4106 if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask))
4107 return;
4108 } else {
4109 goto use_dfl_pwq;
4110 }
4111
4112
4113 pwq = alloc_unbound_pwq(wq, target_attrs);
4114 if (!pwq) {
4115 pr_warn("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n",
4116 wq->name);
4117 goto use_dfl_pwq;
4118 }
4119
4120
4121 mutex_lock(&wq->mutex);
4122 old_pwq = numa_pwq_tbl_install(wq, node, pwq);
4123 goto out_unlock;
4124
4125use_dfl_pwq:
4126 mutex_lock(&wq->mutex);
4127 spin_lock_irq(&wq->dfl_pwq->pool->lock);
4128 get_pwq(wq->dfl_pwq);
4129 spin_unlock_irq(&wq->dfl_pwq->pool->lock);
4130 old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
4131out_unlock:
4132 mutex_unlock(&wq->mutex);
4133 put_pwq_unlocked(old_pwq);
4134}
4135
4136static int alloc_and_link_pwqs(struct workqueue_struct *wq)
4137{
4138 bool highpri = wq->flags & WQ_HIGHPRI;
4139 int cpu, ret;
4140
4141 if (!(wq->flags & WQ_UNBOUND)) {
4142 wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
4143 if (!wq->cpu_pwqs)
4144 return -ENOMEM;
4145
4146 for_each_possible_cpu(cpu) {
4147 struct pool_workqueue *pwq =
4148 per_cpu_ptr(wq->cpu_pwqs, cpu);
4149 struct worker_pool *cpu_pools =
4150 per_cpu(cpu_worker_pools, cpu);
4151
4152 init_pwq(pwq, wq, &cpu_pools[highpri]);
4153
4154 mutex_lock(&wq->mutex);
4155 link_pwq(pwq);
4156 mutex_unlock(&wq->mutex);
4157 }
4158 return 0;
4159 }
4160
4161 get_online_cpus();
4162 if (wq->flags & __WQ_ORDERED) {
4163 ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
4164
4165 WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
4166 wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
4167 "ordering guarantee broken for workqueue %s\n", wq->name);
4168 } else {
4169 ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
4170 }
4171 put_online_cpus();
4172
4173 return ret;
4174}
4175
4176static int wq_clamp_max_active(int max_active, unsigned int flags,
4177 const char *name)
4178{
4179 int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
4180
4181 if (max_active < 1 || max_active > lim)
4182 pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
4183 max_active, name, 1, lim);
4184
4185 return clamp_val(max_active, 1, lim);
4186}
4187
4188
4189
4190
4191
4192static int init_rescuer(struct workqueue_struct *wq)
4193{
4194 struct worker *rescuer;
4195 int ret;
4196
4197 if (!(wq->flags & WQ_MEM_RECLAIM))
4198 return 0;
4199
4200 rescuer = alloc_worker(NUMA_NO_NODE);
4201 if (!rescuer)
4202 return -ENOMEM;
4203
4204 rescuer->rescue_wq = wq;
4205 rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", wq->name);
4206 ret = PTR_ERR_OR_ZERO(rescuer->task);
4207 if (ret) {
4208 kfree(rescuer);
4209 return ret;
4210 }
4211
4212 wq->rescuer = rescuer;
4213 kthread_bind_mask(rescuer->task, cpu_possible_mask);
4214 wake_up_process(rescuer->task);
4215
4216 return 0;
4217}
4218
4219__printf(1, 4)
4220struct workqueue_struct *alloc_workqueue(const char *fmt,
4221 unsigned int flags,
4222 int max_active, ...)
4223{
4224 size_t tbl_size = 0;
4225 va_list args;
4226 struct workqueue_struct *wq;
4227 struct pool_workqueue *pwq;
4228
4229
4230
4231
4232
4233
4234
4235
4236 if ((flags & WQ_UNBOUND) && max_active == 1)
4237 flags |= __WQ_ORDERED;
4238
4239
4240 if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
4241 flags |= WQ_UNBOUND;
4242
4243
4244 if (flags & WQ_UNBOUND)
4245 tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]);
4246
4247 wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL);
4248 if (!wq)
4249 return NULL;
4250
4251 if (flags & WQ_UNBOUND) {
4252 wq->unbound_attrs = alloc_workqueue_attrs();
4253 if (!wq->unbound_attrs)
4254 goto err_free_wq;
4255 }
4256
4257 va_start(args, max_active);
4258 vsnprintf(wq->name, sizeof(wq->name), fmt, args);
4259 va_end(args);
4260
4261 max_active = max_active ?: WQ_DFL_ACTIVE;
4262 max_active = wq_clamp_max_active(max_active, flags, wq->name);
4263
4264
4265 wq->flags = flags;
4266 wq->saved_max_active = max_active;
4267 mutex_init(&wq->mutex);
4268 atomic_set(&wq->nr_pwqs_to_flush, 0);
4269 INIT_LIST_HEAD(&wq->pwqs);
4270 INIT_LIST_HEAD(&wq->flusher_queue);
4271 INIT_LIST_HEAD(&wq->flusher_overflow);
4272 INIT_LIST_HEAD(&wq->maydays);
4273
4274 wq_init_lockdep(wq);
4275 INIT_LIST_HEAD(&wq->list);
4276
4277 if (alloc_and_link_pwqs(wq) < 0)
4278 goto err_unreg_lockdep;
4279
4280 if (wq_online && init_rescuer(wq) < 0)
4281 goto err_destroy;
4282
4283 if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
4284 goto err_destroy;
4285
4286
4287
4288
4289
4290
4291 mutex_lock(&wq_pool_mutex);
4292
4293 mutex_lock(&wq->mutex);
4294 for_each_pwq(pwq, wq)
4295 pwq_adjust_max_active(pwq);
4296 mutex_unlock(&wq->mutex);
4297
4298 list_add_tail_rcu(&wq->list, &workqueues);
4299
4300 mutex_unlock(&wq_pool_mutex);
4301
4302 return wq;
4303
4304err_unreg_lockdep:
4305 wq_unregister_lockdep(wq);
4306 wq_free_lockdep(wq);
4307err_free_wq:
4308 free_workqueue_attrs(wq->unbound_attrs);
4309 kfree(wq);
4310 return NULL;
4311err_destroy:
4312 destroy_workqueue(wq);
4313 return NULL;
4314}
4315EXPORT_SYMBOL_GPL(alloc_workqueue);
4316
4317
4318
4319
4320
4321
4322
4323void destroy_workqueue(struct workqueue_struct *wq)
4324{
4325 struct pool_workqueue *pwq;
4326 int node;
4327
4328
4329 drain_workqueue(wq);
4330
4331
4332 mutex_lock(&wq->mutex);
4333 for_each_pwq(pwq, wq) {
4334 int i;
4335
4336 for (i = 0; i < WORK_NR_COLORS; i++) {
4337 if (WARN_ON(pwq->nr_in_flight[i])) {
4338 mutex_unlock(&wq->mutex);
4339 show_workqueue_state();
4340 return;
4341 }
4342 }
4343
4344 if (WARN_ON((pwq != wq->dfl_pwq) && (pwq->refcnt > 1)) ||
4345 WARN_ON(pwq->nr_active) ||
4346 WARN_ON(!list_empty(&pwq->delayed_works))) {
4347 mutex_unlock(&wq->mutex);
4348 show_workqueue_state();
4349 return;
4350 }
4351 }
4352 mutex_unlock(&wq->mutex);
4353
4354
4355
4356
4357
4358 mutex_lock(&wq_pool_mutex);
4359 list_del_rcu(&wq->list);
4360 mutex_unlock(&wq_pool_mutex);
4361
4362 workqueue_sysfs_unregister(wq);
4363
4364 if (wq->rescuer)
4365 kthread_stop(wq->rescuer->task);
4366
4367 if (!(wq->flags & WQ_UNBOUND)) {
4368 wq_unregister_lockdep(wq);
4369
4370
4371
4372
4373 call_rcu(&wq->rcu, rcu_free_wq);
4374 } else {
4375
4376
4377
4378
4379
4380 for_each_node(node) {
4381 pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
4382 RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL);
4383 put_pwq_unlocked(pwq);
4384 }
4385
4386
4387
4388
4389
4390 pwq = wq->dfl_pwq;
4391 wq->dfl_pwq = NULL;
4392 put_pwq_unlocked(pwq);
4393 }
4394}
4395EXPORT_SYMBOL_GPL(destroy_workqueue);
4396
4397
4398
4399
4400
4401
4402
4403
4404
4405
4406
4407void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
4408{
4409 struct pool_workqueue *pwq;
4410
4411
4412 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
4413 return;
4414
4415 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
4416
4417 mutex_lock(&wq->mutex);
4418
4419 wq->flags &= ~__WQ_ORDERED;
4420 wq->saved_max_active = max_active;
4421
4422 for_each_pwq(pwq, wq)
4423 pwq_adjust_max_active(pwq);
4424
4425 mutex_unlock(&wq->mutex);
4426}
4427EXPORT_SYMBOL_GPL(workqueue_set_max_active);
4428
4429
4430
4431
4432
4433
4434
4435
4436
4437struct work_struct *current_work(void)
4438{
4439 struct worker *worker = current_wq_worker();
4440
4441 return worker ? worker->current_work : NULL;
4442}
4443EXPORT_SYMBOL(current_work);
4444
4445
4446
4447
4448
4449
4450
4451
4452
4453bool current_is_workqueue_rescuer(void)
4454{
4455 struct worker *worker = current_wq_worker();
4456
4457 return worker && worker->rescue_wq;
4458}
4459
4460
4461
4462
4463
4464
4465
4466
4467
4468
4469
4470
4471
4472
4473
4474
4475
4476
4477
4478bool workqueue_congested(int cpu, struct workqueue_struct *wq)
4479{
4480 struct pool_workqueue *pwq;
4481 bool ret;
4482
4483 rcu_read_lock();
4484 preempt_disable();
4485
4486 if (cpu == WORK_CPU_UNBOUND)
4487 cpu = smp_processor_id();
4488
4489 if (!(wq->flags & WQ_UNBOUND))
4490 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
4491 else
4492 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
4493
4494 ret = !list_empty(&pwq->delayed_works);
4495 preempt_enable();
4496 rcu_read_unlock();
4497
4498 return ret;
4499}
4500EXPORT_SYMBOL_GPL(workqueue_congested);
4501
4502
4503
4504
4505
4506
4507
4508
4509
4510
4511
4512
4513unsigned int work_busy(struct work_struct *work)
4514{
4515 struct worker_pool *pool;
4516 unsigned long flags;
4517 unsigned int ret = 0;
4518
4519 if (work_pending(work))
4520 ret |= WORK_BUSY_PENDING;
4521
4522 rcu_read_lock();
4523 pool = get_work_pool(work);
4524 if (pool) {
4525 spin_lock_irqsave(&pool->lock, flags);
4526 if (find_worker_executing_work(pool, work))
4527 ret |= WORK_BUSY_RUNNING;
4528 spin_unlock_irqrestore(&pool->lock, flags);
4529 }
4530 rcu_read_unlock();
4531
4532 return ret;
4533}
4534EXPORT_SYMBOL_GPL(work_busy);
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544
4545
4546void set_worker_desc(const char *fmt, ...)
4547{
4548 struct worker *worker = current_wq_worker();
4549 va_list args;
4550
4551 if (worker) {
4552 va_start(args, fmt);
4553 vsnprintf(worker->desc, sizeof(worker->desc), fmt, args);
4554 va_end(args);
4555 }
4556}
4557EXPORT_SYMBOL_GPL(set_worker_desc);
4558
4559
4560
4561
4562
4563
4564
4565
4566
4567
4568
4569
4570
4571
4572void print_worker_info(const char *log_lvl, struct task_struct *task)
4573{
4574 work_func_t *fn = NULL;
4575 char name[WQ_NAME_LEN] = { };
4576 char desc[WORKER_DESC_LEN] = { };
4577 struct pool_workqueue *pwq = NULL;
4578 struct workqueue_struct *wq = NULL;
4579 struct worker *worker;
4580
4581 if (!(task->flags & PF_WQ_WORKER))
4582 return;
4583
4584
4585
4586
4587
4588 worker = kthread_probe_data(task);
4589
4590
4591
4592
4593
4594 probe_kernel_read(&fn, &worker->current_func, sizeof(fn));
4595 probe_kernel_read(&pwq, &worker->current_pwq, sizeof(pwq));
4596 probe_kernel_read(&wq, &pwq->wq, sizeof(wq));
4597 probe_kernel_read(name, wq->name, sizeof(name) - 1);
4598 probe_kernel_read(desc, worker->desc, sizeof(desc) - 1);
4599
4600 if (fn || name[0] || desc[0]) {
4601 printk("%sWorkqueue: %s %ps", log_lvl, name, fn);
4602 if (strcmp(name, desc))
4603 pr_cont(" (%s)", desc);
4604 pr_cont("\n");
4605 }
4606}
4607
4608static void pr_cont_pool_info(struct worker_pool *pool)
4609{
4610 pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask);
4611 if (pool->node != NUMA_NO_NODE)
4612 pr_cont(" node=%d", pool->node);
4613 pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice);
4614}
4615
4616static void pr_cont_work(bool comma, struct work_struct *work)
4617{
4618 if (work->func == wq_barrier_func) {
4619 struct wq_barrier *barr;
4620
4621 barr = container_of(work, struct wq_barrier, work);
4622
4623 pr_cont("%s BAR(%d)", comma ? "," : "",
4624 task_pid_nr(barr->task));
4625 } else {
4626 pr_cont("%s %ps", comma ? "," : "", work->func);
4627 }
4628}
4629
4630static void show_pwq(struct pool_workqueue *pwq)
4631{
4632 struct worker_pool *pool = pwq->pool;
4633 struct work_struct *work;
4634 struct worker *worker;
4635 bool has_in_flight = false, has_pending = false;
4636 int bkt;
4637
4638 pr_info(" pwq %d:", pool->id);
4639 pr_cont_pool_info(pool);
4640
4641 pr_cont(" active=%d/%d%s\n", pwq->nr_active, pwq->max_active,
4642 !list_empty(&pwq->mayday_node) ? " MAYDAY" : "");
4643
4644 hash_for_each(pool->busy_hash, bkt, worker, hentry) {
4645 if (worker->current_pwq == pwq) {
4646 has_in_flight = true;
4647 break;
4648 }
4649 }
4650 if (has_in_flight) {
4651 bool comma = false;
4652
4653 pr_info(" in-flight:");
4654 hash_for_each(pool->busy_hash, bkt, worker, hentry) {
4655 if (worker->current_pwq != pwq)
4656 continue;
4657
4658 pr_cont("%s %d%s:%ps", comma ? "," : "",
4659 task_pid_nr(worker->task),
4660 worker == pwq->wq->rescuer ? "(RESCUER)" : "",
4661 worker->current_func);
4662 list_for_each_entry(work, &worker->scheduled, entry)
4663 pr_cont_work(false, work);
4664 comma = true;
4665 }
4666 pr_cont("\n");
4667 }
4668
4669 list_for_each_entry(work, &pool->worklist, entry) {
4670 if (get_work_pwq(work) == pwq) {
4671 has_pending = true;
4672 break;
4673 }
4674 }
4675 if (has_pending) {
4676 bool comma = false;
4677
4678 pr_info(" pending:");
4679 list_for_each_entry(work, &pool->worklist, entry) {
4680 if (get_work_pwq(work) != pwq)
4681 continue;
4682
4683 pr_cont_work(comma, work);
4684 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
4685 }
4686 pr_cont("\n");
4687 }
4688
4689 if (!list_empty(&pwq->delayed_works)) {
4690 bool comma = false;
4691
4692 pr_info(" delayed:");
4693 list_for_each_entry(work, &pwq->delayed_works, entry) {
4694 pr_cont_work(comma, work);
4695 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
4696 }
4697 pr_cont("\n");
4698 }
4699}
4700
4701
4702
4703
4704
4705
4706
4707void show_workqueue_state(void)
4708{
4709 struct workqueue_struct *wq;
4710 struct worker_pool *pool;
4711 unsigned long flags;
4712 int pi;
4713
4714 rcu_read_lock();
4715
4716 pr_info("Showing busy workqueues and worker pools:\n");
4717
4718 list_for_each_entry_rcu(wq, &workqueues, list) {
4719 struct pool_workqueue *pwq;
4720 bool idle = true;
4721
4722 for_each_pwq(pwq, wq) {
4723 if (pwq->nr_active || !list_empty(&pwq->delayed_works)) {
4724 idle = false;
4725 break;
4726 }
4727 }
4728 if (idle)
4729 continue;
4730
4731 pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
4732
4733 for_each_pwq(pwq, wq) {
4734 spin_lock_irqsave(&pwq->pool->lock, flags);
4735 if (pwq->nr_active || !list_empty(&pwq->delayed_works))
4736 show_pwq(pwq);
4737 spin_unlock_irqrestore(&pwq->pool->lock, flags);
4738
4739
4740
4741
4742
4743 touch_nmi_watchdog();
4744 }
4745 }
4746
4747 for_each_pool(pool, pi) {
4748 struct worker *worker;
4749 bool first = true;
4750
4751 spin_lock_irqsave(&pool->lock, flags);
4752 if (pool->nr_workers == pool->nr_idle)
4753 goto next_pool;
4754
4755 pr_info("pool %d:", pool->id);
4756 pr_cont_pool_info(pool);
4757 pr_cont(" hung=%us workers=%d",
4758 jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000,
4759 pool->nr_workers);
4760 if (pool->manager)
4761 pr_cont(" manager: %d",
4762 task_pid_nr(pool->manager->task));
4763 list_for_each_entry(worker, &pool->idle_list, entry) {
4764 pr_cont(" %s%d", first ? "idle: " : "",
4765 task_pid_nr(worker->task));
4766 first = false;
4767 }
4768 pr_cont("\n");
4769 next_pool:
4770 spin_unlock_irqrestore(&pool->lock, flags);
4771
4772
4773
4774
4775
4776 touch_nmi_watchdog();
4777 }
4778
4779 rcu_read_unlock();
4780}
4781
4782
4783void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
4784{
4785 int off;
4786
4787
4788 off = strscpy(buf, task->comm, size);
4789 if (off < 0)
4790 return;
4791
4792
4793 mutex_lock(&wq_pool_attach_mutex);
4794
4795 if (task->flags & PF_WQ_WORKER) {
4796 struct worker *worker = kthread_data(task);
4797 struct worker_pool *pool = worker->pool;
4798
4799 if (pool) {
4800 spin_lock_irq(&pool->lock);
4801
4802
4803
4804
4805
4806 if (worker->desc[0] != '\0') {
4807 if (worker->current_work)
4808 scnprintf(buf + off, size - off, "+%s",
4809 worker->desc);
4810 else
4811 scnprintf(buf + off, size - off, "-%s",
4812 worker->desc);
4813 }
4814 spin_unlock_irq(&pool->lock);
4815 }
4816 }
4817
4818 mutex_unlock(&wq_pool_attach_mutex);
4819}
4820
4821#ifdef CONFIG_SMP
4822
4823
4824
4825
4826
4827
4828
4829
4830
4831
4832
4833
4834
4835
4836
4837
4838static void unbind_workers(int cpu)
4839{
4840 struct worker_pool *pool;
4841 struct worker *worker;
4842
4843 for_each_cpu_worker_pool(pool, cpu) {
4844 mutex_lock(&wq_pool_attach_mutex);
4845 spin_lock_irq(&pool->lock);
4846
4847
4848
4849
4850
4851
4852
4853
4854 for_each_pool_worker(worker, pool)
4855 worker->flags |= WORKER_UNBOUND;
4856
4857 pool->flags |= POOL_DISASSOCIATED;
4858
4859 spin_unlock_irq(&pool->lock);
4860 mutex_unlock(&wq_pool_attach_mutex);
4861
4862
4863
4864
4865
4866
4867
4868 schedule();
4869
4870
4871
4872
4873
4874
4875
4876
4877
4878 atomic_set(&pool->nr_running, 0);
4879
4880
4881
4882
4883
4884
4885 spin_lock_irq(&pool->lock);
4886 wake_up_worker(pool);
4887 spin_unlock_irq(&pool->lock);
4888 }
4889}
4890
4891
4892
4893
4894
4895
4896
4897static void rebind_workers(struct worker_pool *pool)
4898{
4899 struct worker *worker;
4900
4901 lockdep_assert_held(&wq_pool_attach_mutex);
4902
4903
4904
4905
4906
4907
4908
4909
4910 for_each_pool_worker(worker, pool)
4911 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
4912 pool->attrs->cpumask) < 0);
4913
4914 spin_lock_irq(&pool->lock);
4915
4916 pool->flags &= ~POOL_DISASSOCIATED;
4917
4918 for_each_pool_worker(worker, pool) {
4919 unsigned int worker_flags = worker->flags;
4920
4921
4922
4923
4924
4925
4926
4927
4928
4929 if (worker_flags & WORKER_IDLE)
4930 wake_up_process(worker->task);
4931
4932
4933
4934
4935
4936
4937
4938
4939
4940
4941
4942
4943
4944
4945
4946
4947 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
4948 worker_flags |= WORKER_REBOUND;
4949 worker_flags &= ~WORKER_UNBOUND;
4950 WRITE_ONCE(worker->flags, worker_flags);
4951 }
4952
4953 spin_unlock_irq(&pool->lock);
4954}
4955
4956
4957
4958
4959
4960
4961
4962
4963
4964
4965
4966static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
4967{
4968 static cpumask_t cpumask;
4969 struct worker *worker;
4970
4971 lockdep_assert_held(&wq_pool_attach_mutex);
4972
4973
4974 if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
4975 return;
4976
4977 cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
4978
4979
4980 for_each_pool_worker(worker, pool)
4981 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
4982}
4983
4984int workqueue_prepare_cpu(unsigned int cpu)
4985{
4986 struct worker_pool *pool;
4987
4988 for_each_cpu_worker_pool(pool, cpu) {
4989 if (pool->nr_workers)
4990 continue;
4991 if (!create_worker(pool))
4992 return -ENOMEM;
4993 }
4994 return 0;
4995}
4996
4997int workqueue_online_cpu(unsigned int cpu)
4998{
4999 struct worker_pool *pool;
5000 struct workqueue_struct *wq;
5001 int pi;
5002
5003 mutex_lock(&wq_pool_mutex);
5004
5005 for_each_pool(pool, pi) {
5006 mutex_lock(&wq_pool_attach_mutex);
5007
5008 if (pool->cpu == cpu)
5009 rebind_workers(pool);
5010 else if (pool->cpu < 0)
5011 restore_unbound_workers_cpumask(pool, cpu);
5012
5013 mutex_unlock(&wq_pool_attach_mutex);
5014 }
5015
5016
5017 list_for_each_entry(wq, &workqueues, list)
5018 wq_update_unbound_numa(wq, cpu, true);
5019
5020 mutex_unlock(&wq_pool_mutex);
5021 return 0;
5022}
5023
5024int workqueue_offline_cpu(unsigned int cpu)
5025{
5026 struct workqueue_struct *wq;
5027
5028
5029 if (WARN_ON(cpu != smp_processor_id()))
5030 return -1;
5031
5032 unbind_workers(cpu);
5033
5034
5035 mutex_lock(&wq_pool_mutex);
5036 list_for_each_entry(wq, &workqueues, list)
5037 wq_update_unbound_numa(wq, cpu, false);
5038 mutex_unlock(&wq_pool_mutex);
5039
5040 return 0;
5041}
5042
5043struct work_for_cpu {
5044 struct work_struct work;
5045 long (*fn)(void *);
5046 void *arg;
5047 long ret;
5048};
5049
5050static void work_for_cpu_fn(struct work_struct *work)
5051{
5052 struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
5053
5054 wfc->ret = wfc->fn(wfc->arg);
5055}
5056
5057
5058
5059
5060
5061
5062
5063
5064
5065
5066
5067
5068long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
5069{
5070 struct work_for_cpu wfc = { .fn = fn, .arg = arg };
5071
5072 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
5073 schedule_work_on(cpu, &wfc.work);
5074 flush_work(&wfc.work);
5075 destroy_work_on_stack(&wfc.work);
5076 return wfc.ret;
5077}
5078EXPORT_SYMBOL_GPL(work_on_cpu);
5079
5080
5081
5082
5083
5084
5085
5086
5087
5088
5089
5090
5091long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
5092{
5093 long ret = -ENODEV;
5094
5095 get_online_cpus();
5096 if (cpu_online(cpu))
5097 ret = work_on_cpu(cpu, fn, arg);
5098 put_online_cpus();
5099 return ret;
5100}
5101EXPORT_SYMBOL_GPL(work_on_cpu_safe);
5102#endif
5103
5104#ifdef CONFIG_FREEZER
5105
5106
5107
5108
5109
5110
5111
5112
5113
5114
5115
5116void freeze_workqueues_begin(void)
5117{
5118 struct workqueue_struct *wq;
5119 struct pool_workqueue *pwq;
5120
5121 mutex_lock(&wq_pool_mutex);
5122
5123 WARN_ON_ONCE(workqueue_freezing);
5124 workqueue_freezing = true;
5125
5126 list_for_each_entry(wq, &workqueues, list) {
5127 mutex_lock(&wq->mutex);
5128 for_each_pwq(pwq, wq)
5129 pwq_adjust_max_active(pwq);
5130 mutex_unlock(&wq->mutex);
5131 }
5132
5133 mutex_unlock(&wq_pool_mutex);
5134}
5135
5136
5137
5138
5139
5140
5141
5142
5143
5144
5145
5146
5147
5148
5149bool freeze_workqueues_busy(void)
5150{
5151 bool busy = false;
5152 struct workqueue_struct *wq;
5153 struct pool_workqueue *pwq;
5154
5155 mutex_lock(&wq_pool_mutex);
5156
5157 WARN_ON_ONCE(!workqueue_freezing);
5158
5159 list_for_each_entry(wq, &workqueues, list) {
5160 if (!(wq->flags & WQ_FREEZABLE))
5161 continue;
5162
5163
5164
5165
5166 rcu_read_lock();
5167 for_each_pwq(pwq, wq) {
5168 WARN_ON_ONCE(pwq->nr_active < 0);
5169 if (pwq->nr_active) {
5170 busy = true;
5171 rcu_read_unlock();
5172 goto out_unlock;
5173 }
5174 }
5175 rcu_read_unlock();
5176 }
5177out_unlock:
5178 mutex_unlock(&wq_pool_mutex);
5179 return busy;
5180}
5181
5182
5183
5184
5185
5186
5187
5188
5189
5190
5191void thaw_workqueues(void)
5192{
5193 struct workqueue_struct *wq;
5194 struct pool_workqueue *pwq;
5195
5196 mutex_lock(&wq_pool_mutex);
5197
5198 if (!workqueue_freezing)
5199 goto out_unlock;
5200
5201 workqueue_freezing = false;
5202
5203
5204 list_for_each_entry(wq, &workqueues, list) {
5205 mutex_lock(&wq->mutex);
5206 for_each_pwq(pwq, wq)
5207 pwq_adjust_max_active(pwq);
5208 mutex_unlock(&wq->mutex);
5209 }
5210
5211out_unlock:
5212 mutex_unlock(&wq_pool_mutex);
5213}
5214#endif
5215
5216static int workqueue_apply_unbound_cpumask(void)
5217{
5218 LIST_HEAD(ctxs);
5219 int ret = 0;
5220 struct workqueue_struct *wq;
5221 struct apply_wqattrs_ctx *ctx, *n;
5222
5223 lockdep_assert_held(&wq_pool_mutex);
5224
5225 list_for_each_entry(wq, &workqueues, list) {
5226 if (!(wq->flags & WQ_UNBOUND))
5227 continue;
5228
5229 if (wq->flags & __WQ_ORDERED)
5230 continue;
5231
5232 ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs);
5233 if (!ctx) {
5234 ret = -ENOMEM;
5235 break;
5236 }
5237
5238 list_add_tail(&ctx->list, &ctxs);
5239 }
5240
5241 list_for_each_entry_safe(ctx, n, &ctxs, list) {
5242 if (!ret)
5243 apply_wqattrs_commit(ctx);
5244 apply_wqattrs_cleanup(ctx);
5245 }
5246
5247 return ret;
5248}
5249
5250
5251
5252
5253
5254
5255
5256
5257
5258
5259
5260
5261
5262int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
5263{
5264 int ret = -EINVAL;
5265 cpumask_var_t saved_cpumask;
5266
5267 if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL))
5268 return -ENOMEM;
5269
5270
5271
5272
5273
5274 cpumask_and(cpumask, cpumask, cpu_possible_mask);
5275 if (!cpumask_empty(cpumask)) {
5276 apply_wqattrs_lock();
5277
5278
5279 cpumask_copy(saved_cpumask, wq_unbound_cpumask);
5280
5281
5282 cpumask_copy(wq_unbound_cpumask, cpumask);
5283 ret = workqueue_apply_unbound_cpumask();
5284
5285
5286 if (ret < 0)
5287 cpumask_copy(wq_unbound_cpumask, saved_cpumask);
5288
5289 apply_wqattrs_unlock();
5290 }
5291
5292 free_cpumask_var(saved_cpumask);
5293 return ret;
5294}
5295
5296#ifdef CONFIG_SYSFS
5297
5298
5299
5300
5301
5302
5303
5304
5305
5306
5307
5308
5309
5310
5311
5312struct wq_device {
5313 struct workqueue_struct *wq;
5314 struct device dev;
5315};
5316
5317static struct workqueue_struct *dev_to_wq(struct device *dev)
5318{
5319 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
5320
5321 return wq_dev->wq;
5322}
5323
5324static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr,
5325 char *buf)
5326{
5327 struct workqueue_struct *wq = dev_to_wq(dev);
5328
5329 return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND));
5330}
5331static DEVICE_ATTR_RO(per_cpu);
5332
5333static ssize_t max_active_show(struct device *dev,
5334 struct device_attribute *attr, char *buf)
5335{
5336 struct workqueue_struct *wq = dev_to_wq(dev);
5337
5338 return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active);
5339}
5340
5341static ssize_t max_active_store(struct device *dev,
5342 struct device_attribute *attr, const char *buf,
5343 size_t count)
5344{
5345 struct workqueue_struct *wq = dev_to_wq(dev);
5346 int val;
5347
5348 if (sscanf(buf, "%d", &val) != 1 || val <= 0)
5349 return -EINVAL;
5350
5351 workqueue_set_max_active(wq, val);
5352 return count;
5353}
5354static DEVICE_ATTR_RW(max_active);
5355
5356static struct attribute *wq_sysfs_attrs[] = {
5357 &dev_attr_per_cpu.attr,
5358 &dev_attr_max_active.attr,
5359 NULL,
5360};
5361ATTRIBUTE_GROUPS(wq_sysfs);
5362
5363static ssize_t wq_pool_ids_show(struct device *dev,
5364 struct device_attribute *attr, char *buf)
5365{
5366 struct workqueue_struct *wq = dev_to_wq(dev);
5367 const char *delim = "";
5368 int node, written = 0;
5369
5370 get_online_cpus();
5371 rcu_read_lock();
5372 for_each_node(node) {
5373 written += scnprintf(buf + written, PAGE_SIZE - written,
5374 "%s%d:%d", delim, node,
5375 unbound_pwq_by_node(wq, node)->pool->id);
5376 delim = " ";
5377 }
5378 written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
5379 rcu_read_unlock();
5380 put_online_cpus();
5381
5382 return written;
5383}
5384
5385static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
5386 char *buf)
5387{
5388 struct workqueue_struct *wq = dev_to_wq(dev);
5389 int written;
5390
5391 mutex_lock(&wq->mutex);
5392 written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice);
5393 mutex_unlock(&wq->mutex);
5394
5395 return written;
5396}
5397
5398
5399static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
5400{
5401 struct workqueue_attrs *attrs;
5402
5403 lockdep_assert_held(&wq_pool_mutex);
5404
5405 attrs = alloc_workqueue_attrs();
5406 if (!attrs)
5407 return NULL;
5408
5409 copy_workqueue_attrs(attrs, wq->unbound_attrs);
5410 return attrs;
5411}
5412
5413static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr,
5414 const char *buf, size_t count)
5415{
5416 struct workqueue_struct *wq = dev_to_wq(dev);
5417 struct workqueue_attrs *attrs;
5418 int ret = -ENOMEM;
5419
5420 apply_wqattrs_lock();
5421
5422 attrs = wq_sysfs_prep_attrs(wq);
5423 if (!attrs)
5424 goto out_unlock;
5425
5426 if (sscanf(buf, "%d", &attrs->nice) == 1 &&
5427 attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE)
5428 ret = apply_workqueue_attrs_locked(wq, attrs);
5429 else
5430 ret = -EINVAL;
5431
5432out_unlock:
5433 apply_wqattrs_unlock();
5434 free_workqueue_attrs(attrs);
5435 return ret ?: count;
5436}
5437
5438static ssize_t wq_cpumask_show(struct device *dev,
5439 struct device_attribute *attr, char *buf)
5440{
5441 struct workqueue_struct *wq = dev_to_wq(dev);
5442 int written;
5443
5444 mutex_lock(&wq->mutex);
5445 written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
5446 cpumask_pr_args(wq->unbound_attrs->cpumask));
5447 mutex_unlock(&wq->mutex);
5448 return written;
5449}
5450
5451static ssize_t wq_cpumask_store(struct device *dev,
5452 struct device_attribute *attr,
5453 const char *buf, size_t count)
5454{
5455 struct workqueue_struct *wq = dev_to_wq(dev);
5456 struct workqueue_attrs *attrs;
5457 int ret = -ENOMEM;
5458
5459 apply_wqattrs_lock();
5460
5461 attrs = wq_sysfs_prep_attrs(wq);
5462 if (!attrs)
5463 goto out_unlock;
5464
5465 ret = cpumask_parse(buf, attrs->cpumask);
5466 if (!ret)
5467 ret = apply_workqueue_attrs_locked(wq, attrs);
5468
5469out_unlock:
5470 apply_wqattrs_unlock();
5471 free_workqueue_attrs(attrs);
5472 return ret ?: count;
5473}
5474
5475static ssize_t wq_numa_show(struct device *dev, struct device_attribute *attr,
5476 char *buf)
5477{
5478 struct workqueue_struct *wq = dev_to_wq(dev);
5479 int written;
5480
5481 mutex_lock(&wq->mutex);
5482 written = scnprintf(buf, PAGE_SIZE, "%d\n",
5483 !wq->unbound_attrs->no_numa);
5484 mutex_unlock(&wq->mutex);
5485
5486 return written;
5487}
5488
5489static ssize_t wq_numa_store(struct device *dev, struct device_attribute *attr,
5490 const char *buf, size_t count)
5491{
5492 struct workqueue_struct *wq = dev_to_wq(dev);
5493 struct workqueue_attrs *attrs;
5494 int v, ret = -ENOMEM;
5495
5496 apply_wqattrs_lock();
5497
5498 attrs = wq_sysfs_prep_attrs(wq);
5499 if (!attrs)
5500 goto out_unlock;
5501
5502 ret = -EINVAL;
5503 if (sscanf(buf, "%d", &v) == 1) {
5504 attrs->no_numa = !v;
5505 ret = apply_workqueue_attrs_locked(wq, attrs);
5506 }
5507
5508out_unlock:
5509 apply_wqattrs_unlock();
5510 free_workqueue_attrs(attrs);
5511 return ret ?: count;
5512}
5513
5514static struct device_attribute wq_sysfs_unbound_attrs[] = {
5515 __ATTR(pool_ids, 0444, wq_pool_ids_show, NULL),
5516 __ATTR(nice, 0644, wq_nice_show, wq_nice_store),
5517 __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
5518 __ATTR(numa, 0644, wq_numa_show, wq_numa_store),
5519 __ATTR_NULL,
5520};
5521
5522static struct bus_type wq_subsys = {
5523 .name = "workqueue",
5524 .dev_groups = wq_sysfs_groups,
5525};
5526
5527static ssize_t wq_unbound_cpumask_show(struct device *dev,
5528 struct device_attribute *attr, char *buf)
5529{
5530 int written;
5531
5532 mutex_lock(&wq_pool_mutex);
5533 written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
5534 cpumask_pr_args(wq_unbound_cpumask));
5535 mutex_unlock(&wq_pool_mutex);
5536
5537 return written;
5538}
5539
5540static ssize_t wq_unbound_cpumask_store(struct device *dev,
5541 struct device_attribute *attr, const char *buf, size_t count)
5542{
5543 cpumask_var_t cpumask;
5544 int ret;
5545
5546 if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
5547 return -ENOMEM;
5548
5549 ret = cpumask_parse(buf, cpumask);
5550 if (!ret)
5551 ret = workqueue_set_unbound_cpumask(cpumask);
5552
5553 free_cpumask_var(cpumask);
5554 return ret ? ret : count;
5555}
5556
5557static struct device_attribute wq_sysfs_cpumask_attr =
5558 __ATTR(cpumask, 0644, wq_unbound_cpumask_show,
5559 wq_unbound_cpumask_store);
5560
5561static int __init wq_sysfs_init(void)
5562{
5563 int err;
5564
5565 err = subsys_virtual_register(&wq_subsys, NULL);
5566 if (err)
5567 return err;
5568
5569 return device_create_file(wq_subsys.dev_root, &wq_sysfs_cpumask_attr);
5570}
5571core_initcall(wq_sysfs_init);
5572
5573static void wq_device_release(struct device *dev)
5574{
5575 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
5576
5577 kfree(wq_dev);
5578}
5579
5580
5581
5582
5583
5584
5585
5586
5587
5588
5589
5590
5591
5592
5593
5594
5595int workqueue_sysfs_register(struct workqueue_struct *wq)
5596{
5597 struct wq_device *wq_dev;
5598 int ret;
5599
5600
5601
5602
5603
5604
5605 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
5606 return -EINVAL;
5607
5608 wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
5609 if (!wq_dev)
5610 return -ENOMEM;
5611
5612 wq_dev->wq = wq;
5613 wq_dev->dev.bus = &wq_subsys;
5614 wq_dev->dev.release = wq_device_release;
5615 dev_set_name(&wq_dev->dev, "%s", wq->name);
5616
5617
5618
5619
5620
5621 dev_set_uevent_suppress(&wq_dev->dev, true);
5622
5623 ret = device_register(&wq_dev->dev);
5624 if (ret) {
5625 put_device(&wq_dev->dev);
5626 wq->wq_dev = NULL;
5627 return ret;
5628 }
5629
5630 if (wq->flags & WQ_UNBOUND) {
5631 struct device_attribute *attr;
5632
5633 for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) {
5634 ret = device_create_file(&wq_dev->dev, attr);
5635 if (ret) {
5636 device_unregister(&wq_dev->dev);
5637 wq->wq_dev = NULL;
5638 return ret;
5639 }
5640 }
5641 }
5642
5643 dev_set_uevent_suppress(&wq_dev->dev, false);
5644 kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
5645 return 0;
5646}
5647
5648
5649
5650
5651
5652
5653
5654static void workqueue_sysfs_unregister(struct workqueue_struct *wq)
5655{
5656 struct wq_device *wq_dev = wq->wq_dev;
5657
5658 if (!wq->wq_dev)
5659 return;
5660
5661 wq->wq_dev = NULL;
5662 device_unregister(&wq_dev->dev);
5663}
5664#else
5665static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { }
5666#endif
5667
5668
5669
5670
5671
5672
5673
5674
5675
5676
5677
5678
5679
5680
5681
5682
5683
5684
5685#ifdef CONFIG_WQ_WATCHDOG
5686
5687static unsigned long wq_watchdog_thresh = 30;
5688static struct timer_list wq_watchdog_timer;
5689
5690static unsigned long wq_watchdog_touched = INITIAL_JIFFIES;
5691static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
5692
5693static void wq_watchdog_reset_touched(void)
5694{
5695 int cpu;
5696
5697 wq_watchdog_touched = jiffies;
5698 for_each_possible_cpu(cpu)
5699 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
5700}
5701
5702static void wq_watchdog_timer_fn(struct timer_list *unused)
5703{
5704 unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
5705 bool lockup_detected = false;
5706 struct worker_pool *pool;
5707 int pi;
5708
5709 if (!thresh)
5710 return;
5711
5712 rcu_read_lock();
5713
5714 for_each_pool(pool, pi) {
5715 unsigned long pool_ts, touched, ts;
5716
5717 if (list_empty(&pool->worklist))
5718 continue;
5719
5720
5721 pool_ts = READ_ONCE(pool->watchdog_ts);
5722 touched = READ_ONCE(wq_watchdog_touched);
5723
5724 if (time_after(pool_ts, touched))
5725 ts = pool_ts;
5726 else
5727 ts = touched;
5728
5729 if (pool->cpu >= 0) {
5730 unsigned long cpu_touched =
5731 READ_ONCE(per_cpu(wq_watchdog_touched_cpu,
5732 pool->cpu));
5733 if (time_after(cpu_touched, ts))
5734 ts = cpu_touched;
5735 }
5736
5737
5738 if (time_after(jiffies, ts + thresh)) {
5739 lockup_detected = true;
5740 pr_emerg("BUG: workqueue lockup - pool");
5741 pr_cont_pool_info(pool);
5742 pr_cont(" stuck for %us!\n",
5743 jiffies_to_msecs(jiffies - pool_ts) / 1000);
5744 }
5745 }
5746
5747 rcu_read_unlock();
5748
5749 if (lockup_detected)
5750 show_workqueue_state();
5751
5752 wq_watchdog_reset_touched();
5753 mod_timer(&wq_watchdog_timer, jiffies + thresh);
5754}
5755
5756notrace void wq_watchdog_touch(int cpu)
5757{
5758 if (cpu >= 0)
5759 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
5760 else
5761 wq_watchdog_touched = jiffies;
5762}
5763
5764static void wq_watchdog_set_thresh(unsigned long thresh)
5765{
5766 wq_watchdog_thresh = 0;
5767 del_timer_sync(&wq_watchdog_timer);
5768
5769 if (thresh) {
5770 wq_watchdog_thresh = thresh;
5771 wq_watchdog_reset_touched();
5772 mod_timer(&wq_watchdog_timer, jiffies + thresh * HZ);
5773 }
5774}
5775
5776static int wq_watchdog_param_set_thresh(const char *val,
5777 const struct kernel_param *kp)
5778{
5779 unsigned long thresh;
5780 int ret;
5781
5782 ret = kstrtoul(val, 0, &thresh);
5783 if (ret)
5784 return ret;
5785
5786 if (system_wq)
5787 wq_watchdog_set_thresh(thresh);
5788 else
5789 wq_watchdog_thresh = thresh;
5790
5791 return 0;
5792}
5793
5794static const struct kernel_param_ops wq_watchdog_thresh_ops = {
5795 .set = wq_watchdog_param_set_thresh,
5796 .get = param_get_ulong,
5797};
5798
5799module_param_cb(watchdog_thresh, &wq_watchdog_thresh_ops, &wq_watchdog_thresh,
5800 0644);
5801
5802static void wq_watchdog_init(void)
5803{
5804 timer_setup(&wq_watchdog_timer, wq_watchdog_timer_fn, TIMER_DEFERRABLE);
5805 wq_watchdog_set_thresh(wq_watchdog_thresh);
5806}
5807
5808#else
5809
5810static inline void wq_watchdog_init(void) { }
5811
5812#endif
5813
5814static void __init wq_numa_init(void)
5815{
5816 cpumask_var_t *tbl;
5817 int node, cpu;
5818
5819 if (num_possible_nodes() <= 1)
5820 return;
5821
5822 if (wq_disable_numa) {
5823 pr_info("workqueue: NUMA affinity support disabled\n");
5824 return;
5825 }
5826
5827 wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs();
5828 BUG_ON(!wq_update_unbound_numa_attrs_buf);
5829
5830
5831
5832
5833
5834
5835 tbl = kcalloc(nr_node_ids, sizeof(tbl[0]), GFP_KERNEL);
5836 BUG_ON(!tbl);
5837
5838 for_each_node(node)
5839 BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
5840 node_online(node) ? node : NUMA_NO_NODE));
5841
5842 for_each_possible_cpu(cpu) {
5843 node = cpu_to_node(cpu);
5844 if (WARN_ON(node == NUMA_NO_NODE)) {
5845 pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
5846
5847 return;
5848 }
5849 cpumask_set_cpu(cpu, tbl[node]);
5850 }
5851
5852 wq_numa_possible_cpumask = tbl;
5853 wq_numa_enabled = true;
5854}
5855
5856
5857
5858
5859
5860
5861
5862
5863
5864
5865
5866int __init workqueue_init_early(void)
5867{
5868 int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
5869 int hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ;
5870 int i, cpu;
5871
5872 WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
5873
5874 BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
5875 cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(hk_flags));
5876
5877 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
5878
5879
5880 for_each_possible_cpu(cpu) {
5881 struct worker_pool *pool;
5882
5883 i = 0;
5884 for_each_cpu_worker_pool(pool, cpu) {
5885 BUG_ON(init_worker_pool(pool));
5886 pool->cpu = cpu;
5887 cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
5888 pool->attrs->nice = std_nice[i++];
5889 pool->node = cpu_to_node(cpu);
5890
5891
5892 mutex_lock(&wq_pool_mutex);
5893 BUG_ON(worker_pool_assign_id(pool));
5894 mutex_unlock(&wq_pool_mutex);
5895 }
5896 }
5897
5898
5899 for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
5900 struct workqueue_attrs *attrs;
5901
5902 BUG_ON(!(attrs = alloc_workqueue_attrs()));
5903 attrs->nice = std_nice[i];
5904 unbound_std_wq_attrs[i] = attrs;
5905
5906
5907
5908
5909
5910
5911 BUG_ON(!(attrs = alloc_workqueue_attrs()));
5912 attrs->nice = std_nice[i];
5913 attrs->no_numa = true;
5914 ordered_wq_attrs[i] = attrs;
5915 }
5916
5917 system_wq = alloc_workqueue("events", 0, 0);
5918 system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
5919 system_long_wq = alloc_workqueue("events_long", 0, 0);
5920 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
5921 WQ_UNBOUND_MAX_ACTIVE);
5922 system_freezable_wq = alloc_workqueue("events_freezable",
5923 WQ_FREEZABLE, 0);
5924 system_power_efficient_wq = alloc_workqueue("events_power_efficient",
5925 WQ_POWER_EFFICIENT, 0);
5926 system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient",
5927 WQ_FREEZABLE | WQ_POWER_EFFICIENT,
5928 0);
5929 BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
5930 !system_unbound_wq || !system_freezable_wq ||
5931 !system_power_efficient_wq ||
5932 !system_freezable_power_efficient_wq);
5933
5934 return 0;
5935}
5936
5937
5938
5939
5940
5941
5942
5943
5944
5945
5946int __init workqueue_init(void)
5947{
5948 struct workqueue_struct *wq;
5949 struct worker_pool *pool;
5950 int cpu, bkt;
5951
5952
5953
5954
5955
5956
5957
5958
5959
5960
5961 wq_numa_init();
5962
5963 mutex_lock(&wq_pool_mutex);
5964
5965 for_each_possible_cpu(cpu) {
5966 for_each_cpu_worker_pool(pool, cpu) {
5967 pool->node = cpu_to_node(cpu);
5968 }
5969 }
5970
5971 list_for_each_entry(wq, &workqueues, list) {
5972 wq_update_unbound_numa(wq, smp_processor_id(), true);
5973 WARN(init_rescuer(wq),
5974 "workqueue: failed to create early rescuer for %s",
5975 wq->name);
5976 }
5977
5978 mutex_unlock(&wq_pool_mutex);
5979
5980
5981 for_each_online_cpu(cpu) {
5982 for_each_cpu_worker_pool(pool, cpu) {
5983 pool->flags &= ~POOL_DISASSOCIATED;
5984 BUG_ON(!create_worker(pool));
5985 }
5986 }
5987
5988 hash_for_each(unbound_pool_hash, bkt, pool, hash_node)
5989 BUG_ON(!create_worker(pool));
5990
5991 wq_online = true;
5992 wq_watchdog_init();
5993
5994 return 0;
5995}
5996