1
2
3
4
5
6#ifndef _LINUX_WORKQUEUE_H
7#define _LINUX_WORKQUEUE_H
8
9#include <linux/timer.h>
10#include <linux/linkage.h>
11#include <linux/bitops.h>
12#include <linux/lockdep.h>
13#include <linux/threads.h>
14#include <linux/atomic.h>
15#include <linux/cpumask.h>
16#include <linux/rcupdate.h>
17
18struct workqueue_struct;
19
20struct work_struct;
21typedef void (*work_func_t)(struct work_struct *work);
22void delayed_work_timer_fn(struct timer_list *t);
23
24
25
26
27
28#define work_data_bits(work) ((unsigned long *)(&(work)->data))
29
30enum {
31 WORK_STRUCT_PENDING_BIT = 0,
32 WORK_STRUCT_DELAYED_BIT = 1,
33 WORK_STRUCT_PWQ_BIT = 2,
34 WORK_STRUCT_LINKED_BIT = 3,
35#ifdef CONFIG_DEBUG_OBJECTS_WORK
36 WORK_STRUCT_STATIC_BIT = 4,
37 WORK_STRUCT_COLOR_SHIFT = 5,
38#else
39 WORK_STRUCT_COLOR_SHIFT = 4,
40#endif
41
42 WORK_STRUCT_COLOR_BITS = 4,
43
44 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
45 WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
46 WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT,
47 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
48#ifdef CONFIG_DEBUG_OBJECTS_WORK
49 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
50#else
51 WORK_STRUCT_STATIC = 0,
52#endif
53
54
55
56
57
58 WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1,
59 WORK_NO_COLOR = WORK_NR_COLORS,
60
61
62 WORK_CPU_UNBOUND = NR_CPUS,
63
64
65
66
67
68
69 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
70 WORK_STRUCT_COLOR_BITS,
71
72
73 WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT,
74
75 __WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE,
76 WORK_OFFQ_CANCELING = (1 << __WORK_OFFQ_CANCELING),
77
78
79
80
81
82
83 WORK_OFFQ_FLAG_BITS = 1,
84 WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
85 WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
86 WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
87 WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1,
88
89
90 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
91 WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
92 WORK_STRUCT_NO_POOL = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT,
93
94
95 WORK_BUSY_PENDING = 1 << 0,
96 WORK_BUSY_RUNNING = 1 << 1,
97
98
99 WORKER_DESC_LEN = 24,
100};
101
102struct work_struct {
103 atomic_long_t data;
104 struct list_head entry;
105 work_func_t func;
106#ifdef CONFIG_LOCKDEP
107 struct lockdep_map lockdep_map;
108#endif
109};
110
111#define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL)
112#define WORK_DATA_STATIC_INIT() \
113 ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC))
114
115struct delayed_work {
116 struct work_struct work;
117 struct timer_list timer;
118
119
120 struct workqueue_struct *wq;
121 int cpu;
122};
123
124struct rcu_work {
125 struct work_struct work;
126 struct rcu_head rcu;
127
128
129 struct workqueue_struct *wq;
130};
131
132
133
134
135
136
137struct workqueue_attrs {
138
139
140
141 int nice;
142
143
144
145
146 cpumask_var_t cpumask;
147
148
149
150
151
152
153
154
155 bool no_numa;
156};
157
158static inline struct delayed_work *to_delayed_work(struct work_struct *work)
159{
160 return container_of(work, struct delayed_work, work);
161}
162
163static inline struct rcu_work *to_rcu_work(struct work_struct *work)
164{
165 return container_of(work, struct rcu_work, work);
166}
167
168struct execute_work {
169 struct work_struct work;
170};
171
172#ifdef CONFIG_LOCKDEP
173
174
175
176
177
178#define __WORK_INIT_LOCKDEP_MAP(n, k) \
179 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
180#else
181#define __WORK_INIT_LOCKDEP_MAP(n, k)
182#endif
183
184#define __WORK_INITIALIZER(n, f) { \
185 .data = WORK_DATA_STATIC_INIT(), \
186 .entry = { &(n).entry, &(n).entry }, \
187 .func = (f), \
188 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
189 }
190
191#define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \
192 .work = __WORK_INITIALIZER((n).work, (f)), \
193 .timer = __TIMER_INITIALIZER(delayed_work_timer_fn,\
194 (tflags) | TIMER_IRQSAFE), \
195 }
196
197#define DECLARE_WORK(n, f) \
198 struct work_struct n = __WORK_INITIALIZER(n, f)
199
200#define DECLARE_DELAYED_WORK(n, f) \
201 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
202
203#define DECLARE_DEFERRABLE_WORK(n, f) \
204 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
205
206#ifdef CONFIG_DEBUG_OBJECTS_WORK
207extern void __init_work(struct work_struct *work, int onstack);
208extern void destroy_work_on_stack(struct work_struct *work);
209extern void destroy_delayed_work_on_stack(struct delayed_work *work);
210static inline unsigned int work_static(struct work_struct *work)
211{
212 return *work_data_bits(work) & WORK_STRUCT_STATIC;
213}
214#else
215static inline void __init_work(struct work_struct *work, int onstack) { }
216static inline void destroy_work_on_stack(struct work_struct *work) { }
217static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { }
218static inline unsigned int work_static(struct work_struct *work) { return 0; }
219#endif
220
221
222
223
224
225
226
227
228#ifdef CONFIG_LOCKDEP
229#define __INIT_WORK(_work, _func, _onstack) \
230 do { \
231 static struct lock_class_key __key; \
232 \
233 __init_work((_work), _onstack); \
234 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
235 lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \
236 INIT_LIST_HEAD(&(_work)->entry); \
237 (_work)->func = (_func); \
238 } while (0)
239#else
240#define __INIT_WORK(_work, _func, _onstack) \
241 do { \
242 __init_work((_work), _onstack); \
243 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
244 INIT_LIST_HEAD(&(_work)->entry); \
245 (_work)->func = (_func); \
246 } while (0)
247#endif
248
249#define INIT_WORK(_work, _func) \
250 __INIT_WORK((_work), (_func), 0)
251
252#define INIT_WORK_ONSTACK(_work, _func) \
253 __INIT_WORK((_work), (_func), 1)
254
255#define __INIT_DELAYED_WORK(_work, _func, _tflags) \
256 do { \
257 INIT_WORK(&(_work)->work, (_func)); \
258 __init_timer(&(_work)->timer, \
259 delayed_work_timer_fn, \
260 (_tflags) | TIMER_IRQSAFE); \
261 } while (0)
262
263#define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \
264 do { \
265 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
266 __init_timer_on_stack(&(_work)->timer, \
267 delayed_work_timer_fn, \
268 (_tflags) | TIMER_IRQSAFE); \
269 } while (0)
270
271#define INIT_DELAYED_WORK(_work, _func) \
272 __INIT_DELAYED_WORK(_work, _func, 0)
273
274#define INIT_DELAYED_WORK_ONSTACK(_work, _func) \
275 __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
276
277#define INIT_DEFERRABLE_WORK(_work, _func) \
278 __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
279
280#define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \
281 __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
282
283#define INIT_RCU_WORK(_work, _func) \
284 INIT_WORK(&(_work)->work, (_func))
285
286#define INIT_RCU_WORK_ONSTACK(_work, _func) \
287 INIT_WORK_ONSTACK(&(_work)->work, (_func))
288
289
290
291
292
293#define work_pending(work) \
294 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
295
296
297
298
299
300
301#define delayed_work_pending(w) \
302 work_pending(&(w)->work)
303
304
305
306
307
308enum {
309 WQ_UNBOUND = 1 << 1,
310 WQ_FREEZABLE = 1 << 2,
311 WQ_MEM_RECLAIM = 1 << 3,
312 WQ_HIGHPRI = 1 << 4,
313 WQ_CPU_INTENSIVE = 1 << 5,
314 WQ_SYSFS = 1 << 6,
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341 WQ_POWER_EFFICIENT = 1 << 7,
342
343 __WQ_DRAINING = 1 << 16,
344 __WQ_ORDERED = 1 << 17,
345 __WQ_LEGACY = 1 << 18,
346 __WQ_ORDERED_EXPLICIT = 1 << 19,
347
348 WQ_MAX_ACTIVE = 512,
349 WQ_MAX_UNBOUND_PER_CPU = 4,
350 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
351};
352
353
354#define WQ_UNBOUND_MAX_ACTIVE \
355 max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385extern struct workqueue_struct *system_wq;
386extern struct workqueue_struct *system_highpri_wq;
387extern struct workqueue_struct *system_long_wq;
388extern struct workqueue_struct *system_unbound_wq;
389extern struct workqueue_struct *system_freezable_wq;
390extern struct workqueue_struct *system_power_efficient_wq;
391extern struct workqueue_struct *system_freezable_power_efficient_wq;
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407struct workqueue_struct *alloc_workqueue(const char *fmt,
408 unsigned int flags,
409 int max_active, ...);
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424#define alloc_ordered_workqueue(fmt, flags, args...) \
425 alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \
426 __WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
427
428#define create_workqueue(name) \
429 alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
430#define create_freezable_workqueue(name) \
431 alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \
432 WQ_MEM_RECLAIM, 1, (name))
433#define create_singlethread_workqueue(name) \
434 alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name)
435
436extern void destroy_workqueue(struct workqueue_struct *wq);
437
438struct workqueue_attrs *alloc_workqueue_attrs(void);
439void free_workqueue_attrs(struct workqueue_attrs *attrs);
440int apply_workqueue_attrs(struct workqueue_struct *wq,
441 const struct workqueue_attrs *attrs);
442int workqueue_set_unbound_cpumask(cpumask_var_t cpumask);
443
444extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
445 struct work_struct *work);
446extern bool queue_work_node(int node, struct workqueue_struct *wq,
447 struct work_struct *work);
448extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
449 struct delayed_work *work, unsigned long delay);
450extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
451 struct delayed_work *dwork, unsigned long delay);
452extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork);
453
454extern void flush_workqueue(struct workqueue_struct *wq);
455extern void drain_workqueue(struct workqueue_struct *wq);
456
457extern int schedule_on_each_cpu(work_func_t func);
458
459int execute_in_process_context(work_func_t fn, struct execute_work *);
460
461extern bool flush_work(struct work_struct *work);
462extern bool cancel_work_sync(struct work_struct *work);
463
464extern bool flush_delayed_work(struct delayed_work *dwork);
465extern bool cancel_delayed_work(struct delayed_work *dwork);
466extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
467
468extern bool flush_rcu_work(struct rcu_work *rwork);
469
470extern void workqueue_set_max_active(struct workqueue_struct *wq,
471 int max_active);
472extern struct work_struct *current_work(void);
473extern bool current_is_workqueue_rescuer(void);
474extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
475extern unsigned int work_busy(struct work_struct *work);
476extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
477extern void print_worker_info(const char *log_lvl, struct task_struct *task);
478extern void show_workqueue_state(void);
479extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
480
481
482
483
484
485
486
487
488
489
490
491static inline bool queue_work(struct workqueue_struct *wq,
492 struct work_struct *work)
493{
494 return queue_work_on(WORK_CPU_UNBOUND, wq, work);
495}
496
497
498
499
500
501
502
503
504
505static inline bool queue_delayed_work(struct workqueue_struct *wq,
506 struct delayed_work *dwork,
507 unsigned long delay)
508{
509 return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
510}
511
512
513
514
515
516
517
518
519
520static inline bool mod_delayed_work(struct workqueue_struct *wq,
521 struct delayed_work *dwork,
522 unsigned long delay)
523{
524 return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
525}
526
527
528
529
530
531
532
533
534static inline bool schedule_work_on(int cpu, struct work_struct *work)
535{
536 return queue_work_on(cpu, system_wq, work);
537}
538
539
540
541
542
543
544
545
546
547
548
549
550static inline bool schedule_work(struct work_struct *work)
551{
552 return queue_work(system_wq, work);
553}
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579static inline void flush_scheduled_work(void)
580{
581 flush_workqueue(system_wq);
582}
583
584
585
586
587
588
589
590
591
592
593static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
594 unsigned long delay)
595{
596 return queue_delayed_work_on(cpu, system_wq, dwork, delay);
597}
598
599
600
601
602
603
604
605
606
607static inline bool schedule_delayed_work(struct delayed_work *dwork,
608 unsigned long delay)
609{
610 return queue_delayed_work(system_wq, dwork, delay);
611}
612
613#ifndef CONFIG_SMP
614static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
615{
616 return fn(arg);
617}
618static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
619{
620 return fn(arg);
621}
622#else
623long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
624long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg);
625#endif
626
627#ifdef CONFIG_FREEZER
628extern void freeze_workqueues_begin(void);
629extern bool freeze_workqueues_busy(void);
630extern void thaw_workqueues(void);
631#endif
632
633#ifdef CONFIG_SYSFS
634int workqueue_sysfs_register(struct workqueue_struct *wq);
635#else
636static inline int workqueue_sysfs_register(struct workqueue_struct *wq)
637{ return 0; }
638#endif
639
640#ifdef CONFIG_WQ_WATCHDOG
641void wq_watchdog_touch(int cpu);
642#else
643static inline void wq_watchdog_touch(int cpu) { }
644#endif
645
646#ifdef CONFIG_SMP
647int workqueue_prepare_cpu(unsigned int cpu);
648int workqueue_online_cpu(unsigned int cpu);
649int workqueue_offline_cpu(unsigned int cpu);
650#endif
651
652int __init workqueue_init_early(void);
653int __init workqueue_init(void);
654
655#endif
656