1
2
3
4
5
6#ifndef _LINUX_WORKQUEUE_H
7#define _LINUX_WORKQUEUE_H
8
9#include <linux/timer.h>
10#include <linux/linkage.h>
11#include <linux/bitops.h>
12#include <linux/lockdep.h>
13#include <linux/threads.h>
14#include <linux/atomic.h>
15#include <linux/cpumask.h>
16
17struct workqueue_struct;
18
19struct work_struct;
20typedef void (*work_func_t)(struct work_struct *work);
21void delayed_work_timer_fn(unsigned long __data);
22
23
24
25
26
27#define work_data_bits(work) ((unsigned long *)(&(work)->data))
28
29enum {
30 WORK_STRUCT_PENDING_BIT = 0,
31 WORK_STRUCT_DELAYED_BIT = 1,
32 WORK_STRUCT_PWQ_BIT = 2,
33 WORK_STRUCT_LINKED_BIT = 3,
34#ifdef CONFIG_DEBUG_OBJECTS_WORK
35 WORK_STRUCT_STATIC_BIT = 4,
36 WORK_STRUCT_COLOR_SHIFT = 5,
37#else
38 WORK_STRUCT_COLOR_SHIFT = 4,
39#endif
40
41 WORK_STRUCT_COLOR_BITS = 4,
42
43 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
44 WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
45 WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT,
46 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
47#ifdef CONFIG_DEBUG_OBJECTS_WORK
48 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
49#else
50 WORK_STRUCT_STATIC = 0,
51#endif
52
53
54
55
56
57 WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1,
58 WORK_NO_COLOR = WORK_NR_COLORS,
59
60
61 WORK_CPU_UNBOUND = NR_CPUS,
62
63
64
65
66
67
68 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
69 WORK_STRUCT_COLOR_BITS,
70
71
72 WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT,
73
74 __WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE,
75 WORK_OFFQ_CANCELING = (1 << __WORK_OFFQ_CANCELING),
76
77
78
79
80
81
82 WORK_OFFQ_FLAG_BITS = 1,
83 WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
84 WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
85 WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
86 WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1,
87
88
89 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
90 WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
91 WORK_STRUCT_NO_POOL = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT,
92
93
94 WORK_BUSY_PENDING = 1 << 0,
95 WORK_BUSY_RUNNING = 1 << 1,
96
97
98 WORKER_DESC_LEN = 24,
99};
100
101struct work_struct {
102 atomic_long_t data;
103 struct list_head entry;
104 work_func_t func;
105#ifdef CONFIG_LOCKDEP
106 struct lockdep_map lockdep_map;
107#endif
108};
109
110#define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL)
111#define WORK_DATA_STATIC_INIT() \
112 ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC))
113
114struct delayed_work {
115 struct work_struct work;
116 struct timer_list timer;
117
118
119 struct workqueue_struct *wq;
120 int cpu;
121};
122
123
124
125
126
127
128struct workqueue_attrs {
129
130
131
132 int nice;
133
134
135
136
137 cpumask_var_t cpumask;
138
139
140
141
142
143
144
145
146 bool no_numa;
147};
148
149static inline struct delayed_work *to_delayed_work(struct work_struct *work)
150{
151 return container_of(work, struct delayed_work, work);
152}
153
154struct execute_work {
155 struct work_struct work;
156};
157
158#ifdef CONFIG_LOCKDEP
159
160
161
162
163
164#define __WORK_INIT_LOCKDEP_MAP(n, k) \
165 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
166#else
167#define __WORK_INIT_LOCKDEP_MAP(n, k)
168#endif
169
170#define __WORK_INITIALIZER(n, f) { \
171 .data = WORK_DATA_STATIC_INIT(), \
172 .entry = { &(n).entry, &(n).entry }, \
173 .func = (f), \
174 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
175 }
176
177#define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \
178 .work = __WORK_INITIALIZER((n).work, (f)), \
179 .timer = __TIMER_INITIALIZER(delayed_work_timer_fn, \
180 0, (unsigned long)&(n), \
181 (tflags) | TIMER_IRQSAFE), \
182 }
183
184#define DECLARE_WORK(n, f) \
185 struct work_struct n = __WORK_INITIALIZER(n, f)
186
187#define DECLARE_DELAYED_WORK(n, f) \
188 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
189
190#define DECLARE_DEFERRABLE_WORK(n, f) \
191 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
192
193#ifdef CONFIG_DEBUG_OBJECTS_WORK
194extern void __init_work(struct work_struct *work, int onstack);
195extern void destroy_work_on_stack(struct work_struct *work);
196extern void destroy_delayed_work_on_stack(struct delayed_work *work);
197static inline unsigned int work_static(struct work_struct *work)
198{
199 return *work_data_bits(work) & WORK_STRUCT_STATIC;
200}
201#else
202static inline void __init_work(struct work_struct *work, int onstack) { }
203static inline void destroy_work_on_stack(struct work_struct *work) { }
204static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { }
205static inline unsigned int work_static(struct work_struct *work) { return 0; }
206#endif
207
208
209
210
211
212
213
214
215#ifdef CONFIG_LOCKDEP
216#define __INIT_WORK(_work, _func, _onstack) \
217 do { \
218 static struct lock_class_key __key; \
219 \
220 __init_work((_work), _onstack); \
221 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
222 lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \
223 INIT_LIST_HEAD(&(_work)->entry); \
224 (_work)->func = (_func); \
225 } while (0)
226#else
227#define __INIT_WORK(_work, _func, _onstack) \
228 do { \
229 __init_work((_work), _onstack); \
230 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
231 INIT_LIST_HEAD(&(_work)->entry); \
232 (_work)->func = (_func); \
233 } while (0)
234#endif
235
236#define INIT_WORK(_work, _func) \
237 __INIT_WORK((_work), (_func), 0)
238
239#define INIT_WORK_ONSTACK(_work, _func) \
240 __INIT_WORK((_work), (_func), 1)
241
242#define __INIT_DELAYED_WORK(_work, _func, _tflags) \
243 do { \
244 INIT_WORK(&(_work)->work, (_func)); \
245 __setup_timer(&(_work)->timer, delayed_work_timer_fn, \
246 (unsigned long)(_work), \
247 (_tflags) | TIMER_IRQSAFE); \
248 } while (0)
249
250#define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \
251 do { \
252 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
253 __setup_timer_on_stack(&(_work)->timer, \
254 delayed_work_timer_fn, \
255 (unsigned long)(_work), \
256 (_tflags) | TIMER_IRQSAFE); \
257 } while (0)
258
259#define INIT_DELAYED_WORK(_work, _func) \
260 __INIT_DELAYED_WORK(_work, _func, 0)
261
262#define INIT_DELAYED_WORK_ONSTACK(_work, _func) \
263 __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
264
265#define INIT_DEFERRABLE_WORK(_work, _func) \
266 __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
267
268#define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \
269 __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
270
271
272
273
274
275#define work_pending(work) \
276 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
277
278
279
280
281
282
283#define delayed_work_pending(w) \
284 work_pending(&(w)->work)
285
286
287
288
289
290enum {
291 WQ_UNBOUND = 1 << 1,
292 WQ_FREEZABLE = 1 << 2,
293 WQ_MEM_RECLAIM = 1 << 3,
294 WQ_HIGHPRI = 1 << 4,
295 WQ_CPU_INTENSIVE = 1 << 5,
296 WQ_SYSFS = 1 << 6,
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323 WQ_POWER_EFFICIENT = 1 << 7,
324
325 __WQ_DRAINING = 1 << 16,
326 __WQ_ORDERED = 1 << 17,
327 __WQ_LEGACY = 1 << 18,
328 __WQ_ORDERED_EXPLICIT = 1 << 19,
329
330 WQ_MAX_ACTIVE = 512,
331 WQ_MAX_UNBOUND_PER_CPU = 4,
332 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
333};
334
335
336#define WQ_UNBOUND_MAX_ACTIVE \
337 max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367extern struct workqueue_struct *system_wq;
368extern struct workqueue_struct *system_highpri_wq;
369extern struct workqueue_struct *system_long_wq;
370extern struct workqueue_struct *system_unbound_wq;
371extern struct workqueue_struct *system_freezable_wq;
372extern struct workqueue_struct *system_power_efficient_wq;
373extern struct workqueue_struct *system_freezable_power_efficient_wq;
374
375extern struct workqueue_struct *
376__alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
377 struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6);
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396#ifdef CONFIG_LOCKDEP
397#define alloc_workqueue(fmt, flags, max_active, args...) \
398({ \
399 static struct lock_class_key __key; \
400 const char *__lock_name; \
401 \
402 __lock_name = #fmt#args; \
403 \
404 __alloc_workqueue_key((fmt), (flags), (max_active), \
405 &__key, __lock_name, ##args); \
406})
407#else
408#define alloc_workqueue(fmt, flags, max_active, args...) \
409 __alloc_workqueue_key((fmt), (flags), (max_active), \
410 NULL, NULL, ##args)
411#endif
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426#define alloc_ordered_workqueue(fmt, flags, args...) \
427 alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \
428 __WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
429
430#define create_workqueue(name) \
431 alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
432#define create_freezable_workqueue(name) \
433 alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \
434 WQ_MEM_RECLAIM, 1, (name))
435#define create_singlethread_workqueue(name) \
436 alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name)
437
438extern void destroy_workqueue(struct workqueue_struct *wq);
439
440struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask);
441void free_workqueue_attrs(struct workqueue_attrs *attrs);
442int apply_workqueue_attrs(struct workqueue_struct *wq,
443 const struct workqueue_attrs *attrs);
444int workqueue_set_unbound_cpumask(cpumask_var_t cpumask);
445
446extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
447 struct work_struct *work);
448extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
449 struct delayed_work *work, unsigned long delay);
450extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
451 struct delayed_work *dwork, unsigned long delay);
452
453extern void flush_workqueue(struct workqueue_struct *wq);
454extern void drain_workqueue(struct workqueue_struct *wq);
455
456extern int schedule_on_each_cpu(work_func_t func);
457
458int execute_in_process_context(work_func_t fn, struct execute_work *);
459
460extern bool flush_work(struct work_struct *work);
461extern bool cancel_work(struct work_struct *work);
462extern bool cancel_work_sync(struct work_struct *work);
463
464extern bool flush_delayed_work(struct delayed_work *dwork);
465extern bool cancel_delayed_work(struct delayed_work *dwork);
466extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
467
468extern void workqueue_set_max_active(struct workqueue_struct *wq,
469 int max_active);
470extern bool current_is_workqueue_rescuer(void);
471extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
472extern unsigned int work_busy(struct work_struct *work);
473extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
474extern void print_worker_info(const char *log_lvl, struct task_struct *task);
475extern void show_workqueue_state(void);
476
477
478
479
480
481
482
483
484
485
486
487static inline bool queue_work(struct workqueue_struct *wq,
488 struct work_struct *work)
489{
490 return queue_work_on(WORK_CPU_UNBOUND, wq, work);
491}
492
493
494
495
496
497
498
499
500
501static inline bool queue_delayed_work(struct workqueue_struct *wq,
502 struct delayed_work *dwork,
503 unsigned long delay)
504{
505 return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
506}
507
508
509
510
511
512
513
514
515
516static inline bool mod_delayed_work(struct workqueue_struct *wq,
517 struct delayed_work *dwork,
518 unsigned long delay)
519{
520 return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
521}
522
523
524
525
526
527
528
529
530static inline bool schedule_work_on(int cpu, struct work_struct *work)
531{
532 return queue_work_on(cpu, system_wq, work);
533}
534
535
536
537
538
539
540
541
542
543
544
545
546static inline bool schedule_work(struct work_struct *work)
547{
548 return queue_work(system_wq, work);
549}
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575static inline void flush_scheduled_work(void)
576{
577 flush_workqueue(system_wq);
578}
579
580
581
582
583
584
585
586
587
588
589static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
590 unsigned long delay)
591{
592 return queue_delayed_work_on(cpu, system_wq, dwork, delay);
593}
594
595
596
597
598
599
600
601
602
603static inline bool schedule_delayed_work(struct delayed_work *dwork,
604 unsigned long delay)
605{
606 return queue_delayed_work(system_wq, dwork, delay);
607}
608
609#ifndef CONFIG_SMP
610static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
611{
612 return fn(arg);
613}
614static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
615{
616 return fn(arg);
617}
618#else
619long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
620long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg);
621#endif
622
623#ifdef CONFIG_FREEZER
624extern void freeze_workqueues_begin(void);
625extern bool freeze_workqueues_busy(void);
626extern void thaw_workqueues(void);
627#endif
628
629#ifdef CONFIG_SYSFS
630int workqueue_sysfs_register(struct workqueue_struct *wq);
631#else
632static inline int workqueue_sysfs_register(struct workqueue_struct *wq)
633{ return 0; }
634#endif
635
636#ifdef CONFIG_WQ_WATCHDOG
637void wq_watchdog_touch(int cpu);
638#else
639static inline void wq_watchdog_touch(int cpu) { }
640#endif
641
642#ifdef CONFIG_SMP
643int workqueue_prepare_cpu(unsigned int cpu);
644int workqueue_online_cpu(unsigned int cpu);
645int workqueue_offline_cpu(unsigned int cpu);
646#endif
647
648int __init workqueue_init_early(void);
649int __init workqueue_init(void);
650
651#endif
652