1
2
3
4
5
6#ifndef _LINUX_WORKQUEUE_H
7#define _LINUX_WORKQUEUE_H
8
9#include <linux/timer.h>
10#include <linux/linkage.h>
11#include <linux/bitops.h>
12#include <linux/lockdep.h>
13#include <linux/threads.h>
14#include <linux/atomic.h>
15#include <linux/cpumask.h>
16#include <linux/rcupdate.h>
17
18struct workqueue_struct;
19
20struct work_struct;
21typedef void (*work_func_t)(struct work_struct *work);
22void delayed_work_timer_fn(struct timer_list *t);
23
24
25
26
27
28#define work_data_bits(work) ((unsigned long *)(&(work)->data))
29
30enum {
31 WORK_STRUCT_PENDING_BIT = 0,
32 WORK_STRUCT_DELAYED_BIT = 1,
33 WORK_STRUCT_PWQ_BIT = 2,
34 WORK_STRUCT_LINKED_BIT = 3,
35#ifdef CONFIG_DEBUG_OBJECTS_WORK
36 WORK_STRUCT_STATIC_BIT = 4,
37 WORK_STRUCT_COLOR_SHIFT = 5,
38#else
39 WORK_STRUCT_COLOR_SHIFT = 4,
40#endif
41
42 WORK_STRUCT_COLOR_BITS = 4,
43
44 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
45 WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
46 WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT,
47 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
48#ifdef CONFIG_DEBUG_OBJECTS_WORK
49 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
50#else
51 WORK_STRUCT_STATIC = 0,
52#endif
53
54
55
56
57
58 WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1,
59 WORK_NO_COLOR = WORK_NR_COLORS,
60
61
62 WORK_CPU_UNBOUND = NR_CPUS,
63
64
65
66
67
68
69 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
70 WORK_STRUCT_COLOR_BITS,
71
72
73 WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT,
74
75 __WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE,
76 WORK_OFFQ_CANCELING = (1 << __WORK_OFFQ_CANCELING),
77
78
79
80
81
82
83 WORK_OFFQ_FLAG_BITS = 1,
84 WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
85 WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
86 WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
87 WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1,
88
89
90 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
91 WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
92 WORK_STRUCT_NO_POOL = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT,
93
94
95 WORK_BUSY_PENDING = 1 << 0,
96 WORK_BUSY_RUNNING = 1 << 1,
97
98
99 WORKER_DESC_LEN = 24,
100};
101
102struct work_struct {
103 atomic_long_t data;
104 struct list_head entry;
105 work_func_t func;
106#ifdef CONFIG_LOCKDEP
107 struct lockdep_map lockdep_map;
108#endif
109};
110
111#define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL)
112#define WORK_DATA_STATIC_INIT() \
113 ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC))
114
115struct delayed_work {
116 struct work_struct work;
117 struct timer_list timer;
118
119
120 struct workqueue_struct *wq;
121 int cpu;
122};
123
124struct rcu_work {
125 struct work_struct work;
126 struct rcu_head rcu;
127
128
129 struct workqueue_struct *wq;
130};
131
132
133
134
135
136
137struct workqueue_attrs {
138
139
140
141 int nice;
142
143
144
145
146 cpumask_var_t cpumask;
147
148
149
150
151
152
153
154
155 bool no_numa;
156};
157
158static inline struct delayed_work *to_delayed_work(struct work_struct *work)
159{
160 return container_of(work, struct delayed_work, work);
161}
162
163static inline struct rcu_work *to_rcu_work(struct work_struct *work)
164{
165 return container_of(work, struct rcu_work, work);
166}
167
168struct execute_work {
169 struct work_struct work;
170};
171
172#ifdef CONFIG_LOCKDEP
173
174
175
176
177
178#define __WORK_INIT_LOCKDEP_MAP(n, k) \
179 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
180#else
181#define __WORK_INIT_LOCKDEP_MAP(n, k)
182#endif
183
184#define __WORK_INITIALIZER(n, f) { \
185 .data = WORK_DATA_STATIC_INIT(), \
186 .entry = { &(n).entry, &(n).entry }, \
187 .func = (f), \
188 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
189 }
190
191#define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \
192 .work = __WORK_INITIALIZER((n).work, (f)), \
193 .timer = __TIMER_INITIALIZER(delayed_work_timer_fn,\
194 (tflags) | TIMER_IRQSAFE), \
195 }
196
197#define DECLARE_WORK(n, f) \
198 struct work_struct n = __WORK_INITIALIZER(n, f)
199
200#define DECLARE_DELAYED_WORK(n, f) \
201 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
202
203#define DECLARE_DEFERRABLE_WORK(n, f) \
204 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
205
206#ifdef CONFIG_DEBUG_OBJECTS_WORK
207extern void __init_work(struct work_struct *work, int onstack);
208extern void destroy_work_on_stack(struct work_struct *work);
209extern void destroy_delayed_work_on_stack(struct delayed_work *work);
210static inline unsigned int work_static(struct work_struct *work)
211{
212 return *work_data_bits(work) & WORK_STRUCT_STATIC;
213}
214#else
215static inline void __init_work(struct work_struct *work, int onstack) { }
216static inline void destroy_work_on_stack(struct work_struct *work) { }
217static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { }
218static inline unsigned int work_static(struct work_struct *work) { return 0; }
219#endif
220
221
222
223
224
225
226
227
228#ifdef CONFIG_LOCKDEP
229#define __INIT_WORK(_work, _func, _onstack) \
230 do { \
231 static struct lock_class_key __key; \
232 \
233 __init_work((_work), _onstack); \
234 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
235 lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \
236 INIT_LIST_HEAD(&(_work)->entry); \
237 (_work)->func = (_func); \
238 } while (0)
239#else
240#define __INIT_WORK(_work, _func, _onstack) \
241 do { \
242 __init_work((_work), _onstack); \
243 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
244 INIT_LIST_HEAD(&(_work)->entry); \
245 (_work)->func = (_func); \
246 } while (0)
247#endif
248
249#define INIT_WORK(_work, _func) \
250 __INIT_WORK((_work), (_func), 0)
251
252#define INIT_WORK_ONSTACK(_work, _func) \
253 __INIT_WORK((_work), (_func), 1)
254
255#define __INIT_DELAYED_WORK(_work, _func, _tflags) \
256 do { \
257 INIT_WORK(&(_work)->work, (_func)); \
258 __init_timer(&(_work)->timer, \
259 delayed_work_timer_fn, \
260 (_tflags) | TIMER_IRQSAFE); \
261 } while (0)
262
263#define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \
264 do { \
265 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
266 __init_timer_on_stack(&(_work)->timer, \
267 delayed_work_timer_fn, \
268 (_tflags) | TIMER_IRQSAFE); \
269 } while (0)
270
271#define INIT_DELAYED_WORK(_work, _func) \
272 __INIT_DELAYED_WORK(_work, _func, 0)
273
274#define INIT_DELAYED_WORK_ONSTACK(_work, _func) \
275 __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
276
277#define INIT_DEFERRABLE_WORK(_work, _func) \
278 __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
279
280#define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \
281 __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
282
283#define INIT_RCU_WORK(_work, _func) \
284 INIT_WORK(&(_work)->work, (_func))
285
286#define INIT_RCU_WORK_ONSTACK(_work, _func) \
287 INIT_WORK_ONSTACK(&(_work)->work, (_func))
288
289
290
291
292
293#define work_pending(work) \
294 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
295
296
297
298
299
300
301#define delayed_work_pending(w) \
302 work_pending(&(w)->work)
303
304
305
306
307
308enum {
309 WQ_UNBOUND = 1 << 1,
310 WQ_FREEZABLE = 1 << 2,
311 WQ_MEM_RECLAIM = 1 << 3,
312 WQ_HIGHPRI = 1 << 4,
313 WQ_CPU_INTENSIVE = 1 << 5,
314 WQ_SYSFS = 1 << 6,
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341 WQ_POWER_EFFICIENT = 1 << 7,
342
343 __WQ_DRAINING = 1 << 16,
344 __WQ_ORDERED = 1 << 17,
345 __WQ_LEGACY = 1 << 18,
346 __WQ_ORDERED_EXPLICIT = 1 << 19,
347
348 WQ_MAX_ACTIVE = 512,
349 WQ_MAX_UNBOUND_PER_CPU = 4,
350 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
351};
352
353
354#define WQ_UNBOUND_MAX_ACTIVE \
355 max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385extern struct workqueue_struct *system_wq;
386extern struct workqueue_struct *system_highpri_wq;
387extern struct workqueue_struct *system_long_wq;
388extern struct workqueue_struct *system_unbound_wq;
389extern struct workqueue_struct *system_freezable_wq;
390extern struct workqueue_struct *system_power_efficient_wq;
391extern struct workqueue_struct *system_freezable_power_efficient_wq;
392
393extern struct workqueue_struct *
394__alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
395 struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6);
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414#ifdef CONFIG_LOCKDEP
415#define alloc_workqueue(fmt, flags, max_active, args...) \
416({ \
417 static struct lock_class_key __key; \
418 const char *__lock_name; \
419 \
420 __lock_name = "(wq_completion)"#fmt#args; \
421 \
422 __alloc_workqueue_key((fmt), (flags), (max_active), \
423 &__key, __lock_name, ##args); \
424})
425#else
426#define alloc_workqueue(fmt, flags, max_active, args...) \
427 __alloc_workqueue_key((fmt), (flags), (max_active), \
428 NULL, NULL, ##args)
429#endif
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444#define alloc_ordered_workqueue(fmt, flags, args...) \
445 alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \
446 __WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
447
448#define create_workqueue(name) \
449 alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
450#define create_freezable_workqueue(name) \
451 alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \
452 WQ_MEM_RECLAIM, 1, (name))
453#define create_singlethread_workqueue(name) \
454 alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name)
455
456extern void destroy_workqueue(struct workqueue_struct *wq);
457
458struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask);
459void free_workqueue_attrs(struct workqueue_attrs *attrs);
460int apply_workqueue_attrs(struct workqueue_struct *wq,
461 const struct workqueue_attrs *attrs);
462int workqueue_set_unbound_cpumask(cpumask_var_t cpumask);
463
464extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
465 struct work_struct *work);
466extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
467 struct delayed_work *work, unsigned long delay);
468extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
469 struct delayed_work *dwork, unsigned long delay);
470extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork);
471
472extern void flush_workqueue(struct workqueue_struct *wq);
473extern void drain_workqueue(struct workqueue_struct *wq);
474
475extern int schedule_on_each_cpu(work_func_t func);
476
477int execute_in_process_context(work_func_t fn, struct execute_work *);
478
479extern bool flush_work(struct work_struct *work);
480extern bool cancel_work_sync(struct work_struct *work);
481
482extern bool flush_delayed_work(struct delayed_work *dwork);
483extern bool cancel_delayed_work(struct delayed_work *dwork);
484extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
485
486extern bool flush_rcu_work(struct rcu_work *rwork);
487
488extern void workqueue_set_max_active(struct workqueue_struct *wq,
489 int max_active);
490extern struct work_struct *current_work(void);
491extern bool current_is_workqueue_rescuer(void);
492extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
493extern unsigned int work_busy(struct work_struct *work);
494extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
495extern void print_worker_info(const char *log_lvl, struct task_struct *task);
496extern void show_workqueue_state(void);
497
498
499
500
501
502
503
504
505
506
507
508static inline bool queue_work(struct workqueue_struct *wq,
509 struct work_struct *work)
510{
511 return queue_work_on(WORK_CPU_UNBOUND, wq, work);
512}
513
514
515
516
517
518
519
520
521
522static inline bool queue_delayed_work(struct workqueue_struct *wq,
523 struct delayed_work *dwork,
524 unsigned long delay)
525{
526 return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
527}
528
529
530
531
532
533
534
535
536
537static inline bool mod_delayed_work(struct workqueue_struct *wq,
538 struct delayed_work *dwork,
539 unsigned long delay)
540{
541 return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
542}
543
544
545
546
547
548
549
550
551static inline bool schedule_work_on(int cpu, struct work_struct *work)
552{
553 return queue_work_on(cpu, system_wq, work);
554}
555
556
557
558
559
560
561
562
563
564
565
566
567static inline bool schedule_work(struct work_struct *work)
568{
569 return queue_work(system_wq, work);
570}
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596static inline void flush_scheduled_work(void)
597{
598 flush_workqueue(system_wq);
599}
600
601
602
603
604
605
606
607
608
609
610static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
611 unsigned long delay)
612{
613 return queue_delayed_work_on(cpu, system_wq, dwork, delay);
614}
615
616
617
618
619
620
621
622
623
624static inline bool schedule_delayed_work(struct delayed_work *dwork,
625 unsigned long delay)
626{
627 return queue_delayed_work(system_wq, dwork, delay);
628}
629
630#ifndef CONFIG_SMP
631static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
632{
633 return fn(arg);
634}
635static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
636{
637 return fn(arg);
638}
639#else
640long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
641long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg);
642#endif
643
644#ifdef CONFIG_FREEZER
645extern void freeze_workqueues_begin(void);
646extern bool freeze_workqueues_busy(void);
647extern void thaw_workqueues(void);
648#endif
649
650#ifdef CONFIG_SYSFS
651int workqueue_sysfs_register(struct workqueue_struct *wq);
652#else
653static inline int workqueue_sysfs_register(struct workqueue_struct *wq)
654{ return 0; }
655#endif
656
657#ifdef CONFIG_WQ_WATCHDOG
658void wq_watchdog_touch(int cpu);
659#else
660static inline void wq_watchdog_touch(int cpu) { }
661#endif
662
663#ifdef CONFIG_SMP
664int workqueue_prepare_cpu(unsigned int cpu);
665int workqueue_online_cpu(unsigned int cpu);
666int workqueue_offline_cpu(unsigned int cpu);
667#endif
668
669int __init workqueue_init_early(void);
670int __init workqueue_init(void);
671
672#endif
673