1
2
3
4
5
6#ifndef _LINUX_WORKQUEUE_H
7#define _LINUX_WORKQUEUE_H
8
9#include <linux/timer.h>
10#include <linux/linkage.h>
11#include <linux/bitops.h>
12#include <linux/lockdep.h>
13#include <linux/threads.h>
14#include <linux/atomic.h>
15#include <linux/cpumask.h>
16#include <linux/rcupdate.h>
17
18struct workqueue_struct;
19
20struct work_struct;
21typedef void (*work_func_t)(struct work_struct *work);
22void delayed_work_timer_fn(struct timer_list *t);
23
24
25
26
27
28#define work_data_bits(work) ((unsigned long *)(&(work)->data))
29
30enum {
31 WORK_STRUCT_PENDING_BIT = 0,
32 WORK_STRUCT_INACTIVE_BIT= 1,
33 WORK_STRUCT_PWQ_BIT = 2,
34 WORK_STRUCT_LINKED_BIT = 3,
35#ifdef CONFIG_DEBUG_OBJECTS_WORK
36 WORK_STRUCT_STATIC_BIT = 4,
37 WORK_STRUCT_COLOR_SHIFT = 5,
38#else
39 WORK_STRUCT_COLOR_SHIFT = 4,
40#endif
41
42 WORK_STRUCT_COLOR_BITS = 4,
43
44 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
45 WORK_STRUCT_INACTIVE = 1 << WORK_STRUCT_INACTIVE_BIT,
46 WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT,
47 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
48#ifdef CONFIG_DEBUG_OBJECTS_WORK
49 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
50#else
51 WORK_STRUCT_STATIC = 0,
52#endif
53
54 WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS),
55
56
57 WORK_CPU_UNBOUND = NR_CPUS,
58
59
60
61
62
63
64 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
65 WORK_STRUCT_COLOR_BITS,
66
67
68 WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT,
69
70 __WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE,
71 WORK_OFFQ_CANCELING = (1 << __WORK_OFFQ_CANCELING),
72
73
74
75
76
77
78 WORK_OFFQ_FLAG_BITS = 1,
79 WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
80 WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
81 WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
82 WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1,
83
84
85 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
86 WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
87 WORK_STRUCT_NO_POOL = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT,
88
89
90 WORK_BUSY_PENDING = 1 << 0,
91 WORK_BUSY_RUNNING = 1 << 1,
92
93
94 WORKER_DESC_LEN = 24,
95};
96
97struct work_struct {
98 atomic_long_t data;
99 struct list_head entry;
100 work_func_t func;
101#ifdef CONFIG_LOCKDEP
102 struct lockdep_map lockdep_map;
103#endif
104};
105
106#define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL)
107#define WORK_DATA_STATIC_INIT() \
108 ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC))
109
110struct delayed_work {
111 struct work_struct work;
112 struct timer_list timer;
113
114
115 struct workqueue_struct *wq;
116 int cpu;
117};
118
119struct rcu_work {
120 struct work_struct work;
121 struct rcu_head rcu;
122
123
124 struct workqueue_struct *wq;
125};
126
127
128
129
130
131
132struct workqueue_attrs {
133
134
135
136 int nice;
137
138
139
140
141 cpumask_var_t cpumask;
142
143
144
145
146
147
148
149
150 bool no_numa;
151};
152
153static inline struct delayed_work *to_delayed_work(struct work_struct *work)
154{
155 return container_of(work, struct delayed_work, work);
156}
157
158static inline struct rcu_work *to_rcu_work(struct work_struct *work)
159{
160 return container_of(work, struct rcu_work, work);
161}
162
163struct execute_work {
164 struct work_struct work;
165};
166
167#ifdef CONFIG_LOCKDEP
168
169
170
171
172
173#define __WORK_INIT_LOCKDEP_MAP(n, k) \
174 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
175#else
176#define __WORK_INIT_LOCKDEP_MAP(n, k)
177#endif
178
179#define __WORK_INITIALIZER(n, f) { \
180 .data = WORK_DATA_STATIC_INIT(), \
181 .entry = { &(n).entry, &(n).entry }, \
182 .func = (f), \
183 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
184 }
185
186#define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \
187 .work = __WORK_INITIALIZER((n).work, (f)), \
188 .timer = __TIMER_INITIALIZER(delayed_work_timer_fn,\
189 (tflags) | TIMER_IRQSAFE), \
190 }
191
192#define DECLARE_WORK(n, f) \
193 struct work_struct n = __WORK_INITIALIZER(n, f)
194
195#define DECLARE_DELAYED_WORK(n, f) \
196 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
197
198#define DECLARE_DEFERRABLE_WORK(n, f) \
199 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
200
201#ifdef CONFIG_DEBUG_OBJECTS_WORK
202extern void __init_work(struct work_struct *work, int onstack);
203extern void destroy_work_on_stack(struct work_struct *work);
204extern void destroy_delayed_work_on_stack(struct delayed_work *work);
205static inline unsigned int work_static(struct work_struct *work)
206{
207 return *work_data_bits(work) & WORK_STRUCT_STATIC;
208}
209#else
210static inline void __init_work(struct work_struct *work, int onstack) { }
211static inline void destroy_work_on_stack(struct work_struct *work) { }
212static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { }
213static inline unsigned int work_static(struct work_struct *work) { return 0; }
214#endif
215
216
217
218
219
220
221
222
223#ifdef CONFIG_LOCKDEP
224#define __INIT_WORK(_work, _func, _onstack) \
225 do { \
226 static struct lock_class_key __key; \
227 \
228 __init_work((_work), _onstack); \
229 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
230 lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \
231 INIT_LIST_HEAD(&(_work)->entry); \
232 (_work)->func = (_func); \
233 } while (0)
234#else
235#define __INIT_WORK(_work, _func, _onstack) \
236 do { \
237 __init_work((_work), _onstack); \
238 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
239 INIT_LIST_HEAD(&(_work)->entry); \
240 (_work)->func = (_func); \
241 } while (0)
242#endif
243
244#define INIT_WORK(_work, _func) \
245 __INIT_WORK((_work), (_func), 0)
246
247#define INIT_WORK_ONSTACK(_work, _func) \
248 __INIT_WORK((_work), (_func), 1)
249
250#define __INIT_DELAYED_WORK(_work, _func, _tflags) \
251 do { \
252 INIT_WORK(&(_work)->work, (_func)); \
253 __init_timer(&(_work)->timer, \
254 delayed_work_timer_fn, \
255 (_tflags) | TIMER_IRQSAFE); \
256 } while (0)
257
258#define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \
259 do { \
260 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
261 __init_timer_on_stack(&(_work)->timer, \
262 delayed_work_timer_fn, \
263 (_tflags) | TIMER_IRQSAFE); \
264 } while (0)
265
266#define INIT_DELAYED_WORK(_work, _func) \
267 __INIT_DELAYED_WORK(_work, _func, 0)
268
269#define INIT_DELAYED_WORK_ONSTACK(_work, _func) \
270 __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
271
272#define INIT_DEFERRABLE_WORK(_work, _func) \
273 __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
274
275#define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \
276 __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
277
278#define INIT_RCU_WORK(_work, _func) \
279 INIT_WORK(&(_work)->work, (_func))
280
281#define INIT_RCU_WORK_ONSTACK(_work, _func) \
282 INIT_WORK_ONSTACK(&(_work)->work, (_func))
283
284
285
286
287
288#define work_pending(work) \
289 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
290
291
292
293
294
295
296#define delayed_work_pending(w) \
297 work_pending(&(w)->work)
298
299
300
301
302
303enum {
304 WQ_UNBOUND = 1 << 1,
305 WQ_FREEZABLE = 1 << 2,
306 WQ_MEM_RECLAIM = 1 << 3,
307 WQ_HIGHPRI = 1 << 4,
308 WQ_CPU_INTENSIVE = 1 << 5,
309 WQ_SYSFS = 1 << 6,
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336 WQ_POWER_EFFICIENT = 1 << 7,
337
338 __WQ_DRAINING = 1 << 16,
339 __WQ_ORDERED = 1 << 17,
340 __WQ_LEGACY = 1 << 18,
341 __WQ_ORDERED_EXPLICIT = 1 << 19,
342
343 WQ_MAX_ACTIVE = 512,
344 WQ_MAX_UNBOUND_PER_CPU = 4,
345 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
346};
347
348
349#define WQ_UNBOUND_MAX_ACTIVE \
350 max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380extern struct workqueue_struct *system_wq;
381extern struct workqueue_struct *system_highpri_wq;
382extern struct workqueue_struct *system_long_wq;
383extern struct workqueue_struct *system_unbound_wq;
384extern struct workqueue_struct *system_freezable_wq;
385extern struct workqueue_struct *system_power_efficient_wq;
386extern struct workqueue_struct *system_freezable_power_efficient_wq;
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402__printf(1, 4) struct workqueue_struct *
403alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...);
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418#define alloc_ordered_workqueue(fmt, flags, args...) \
419 alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \
420 __WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
421
422#define create_workqueue(name) \
423 alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
424#define create_freezable_workqueue(name) \
425 alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \
426 WQ_MEM_RECLAIM, 1, (name))
427#define create_singlethread_workqueue(name) \
428 alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name)
429
430extern void destroy_workqueue(struct workqueue_struct *wq);
431
432struct workqueue_attrs *alloc_workqueue_attrs(void);
433void free_workqueue_attrs(struct workqueue_attrs *attrs);
434int apply_workqueue_attrs(struct workqueue_struct *wq,
435 const struct workqueue_attrs *attrs);
436int workqueue_set_unbound_cpumask(cpumask_var_t cpumask);
437
438extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
439 struct work_struct *work);
440extern bool queue_work_node(int node, struct workqueue_struct *wq,
441 struct work_struct *work);
442extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
443 struct delayed_work *work, unsigned long delay);
444extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
445 struct delayed_work *dwork, unsigned long delay);
446extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork);
447
448extern void __flush_workqueue(struct workqueue_struct *wq);
449extern void drain_workqueue(struct workqueue_struct *wq);
450
451extern int schedule_on_each_cpu(work_func_t func);
452
453int execute_in_process_context(work_func_t fn, struct execute_work *);
454
455extern bool flush_work(struct work_struct *work);
456extern bool cancel_work_sync(struct work_struct *work);
457
458extern bool flush_delayed_work(struct delayed_work *dwork);
459extern bool cancel_delayed_work(struct delayed_work *dwork);
460extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
461
462extern bool flush_rcu_work(struct rcu_work *rwork);
463
464extern void workqueue_set_max_active(struct workqueue_struct *wq,
465 int max_active);
466extern struct work_struct *current_work(void);
467extern bool current_is_workqueue_rescuer(void);
468extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
469extern unsigned int work_busy(struct work_struct *work);
470extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
471extern void print_worker_info(const char *log_lvl, struct task_struct *task);
472extern void show_all_workqueues(void);
473extern void show_one_workqueue(struct workqueue_struct *wq);
474extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499static inline bool queue_work(struct workqueue_struct *wq,
500 struct work_struct *work)
501{
502 return queue_work_on(WORK_CPU_UNBOUND, wq, work);
503}
504
505
506
507
508
509
510
511
512
513static inline bool queue_delayed_work(struct workqueue_struct *wq,
514 struct delayed_work *dwork,
515 unsigned long delay)
516{
517 return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
518}
519
520
521
522
523
524
525
526
527
528static inline bool mod_delayed_work(struct workqueue_struct *wq,
529 struct delayed_work *dwork,
530 unsigned long delay)
531{
532 return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
533}
534
535
536
537
538
539
540
541
542static inline bool schedule_work_on(int cpu, struct work_struct *work)
543{
544 return queue_work_on(cpu, system_wq, work);
545}
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561static inline bool schedule_work(struct work_struct *work)
562{
563 return queue_work(system_wq, work);
564}
565
566
567
568
569
570
571
572extern void __warn_flushing_systemwide_wq(void)
573 __compiletime_warning("Please avoid flushing system-wide workqueues.");
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610#define flush_scheduled_work() \
611({ \
612 if (0) \
613 __warn_flushing_systemwide_wq(); \
614 __flush_workqueue(system_wq); \
615})
616
617
618
619
620
621#define flush_workqueue(wq) \
622({ \
623 struct workqueue_struct *_wq = (wq); \
624 \
625 if ((__builtin_constant_p(_wq == system_wq) && \
626 _wq == system_wq) || \
627 (__builtin_constant_p(_wq == system_highpri_wq) && \
628 _wq == system_highpri_wq) || \
629 (__builtin_constant_p(_wq == system_long_wq) && \
630 _wq == system_long_wq) || \
631 (__builtin_constant_p(_wq == system_unbound_wq) && \
632 _wq == system_unbound_wq) || \
633 (__builtin_constant_p(_wq == system_freezable_wq) && \
634 _wq == system_freezable_wq) || \
635 (__builtin_constant_p(_wq == system_power_efficient_wq) && \
636 _wq == system_power_efficient_wq) || \
637 (__builtin_constant_p(_wq == system_freezable_power_efficient_wq) && \
638 _wq == system_freezable_power_efficient_wq)) \
639 __warn_flushing_systemwide_wq(); \
640 __flush_workqueue(_wq); \
641})
642
643
644
645
646
647
648
649
650
651
652static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
653 unsigned long delay)
654{
655 return queue_delayed_work_on(cpu, system_wq, dwork, delay);
656}
657
658
659
660
661
662
663
664
665
666static inline bool schedule_delayed_work(struct delayed_work *dwork,
667 unsigned long delay)
668{
669 return queue_delayed_work(system_wq, dwork, delay);
670}
671
672#ifndef CONFIG_SMP
673static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
674{
675 return fn(arg);
676}
677static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
678{
679 return fn(arg);
680}
681#else
682long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
683long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg);
684#endif
685
686#ifdef CONFIG_FREEZER
687extern void freeze_workqueues_begin(void);
688extern bool freeze_workqueues_busy(void);
689extern void thaw_workqueues(void);
690#endif
691
692#ifdef CONFIG_SYSFS
693int workqueue_sysfs_register(struct workqueue_struct *wq);
694#else
695static inline int workqueue_sysfs_register(struct workqueue_struct *wq)
696{ return 0; }
697#endif
698
699#ifdef CONFIG_WQ_WATCHDOG
700void wq_watchdog_touch(int cpu);
701#else
702static inline void wq_watchdog_touch(int cpu) { }
703#endif
704
705#ifdef CONFIG_SMP
706int workqueue_prepare_cpu(unsigned int cpu);
707int workqueue_online_cpu(unsigned int cpu);
708int workqueue_offline_cpu(unsigned int cpu);
709#endif
710
711void __init workqueue_init_early(void);
712void __init workqueue_init(void);
713
714#endif
715