1
2
3
4
5
6#ifndef _LINUX_WORKQUEUE_H
7#define _LINUX_WORKQUEUE_H
8
9#include <linux/timer.h>
10#include <linux/linkage.h>
11#include <linux/bitops.h>
12#include <linux/lockdep.h>
13#include <linux/threads.h>
14#include <linux/atomic.h>
15#include <linux/cpumask.h>
16
17struct workqueue_struct;
18
19struct work_struct;
20typedef void (*work_func_t)(struct work_struct *work);
21void delayed_work_timer_fn(struct timer_list *t);
22
23
24
25
26
27#define work_data_bits(work) ((unsigned long *)(&(work)->data))
28
29enum {
30 WORK_STRUCT_PENDING_BIT = 0,
31 WORK_STRUCT_DELAYED_BIT = 1,
32 WORK_STRUCT_PWQ_BIT = 2,
33 WORK_STRUCT_LINKED_BIT = 3,
34#ifdef CONFIG_DEBUG_OBJECTS_WORK
35 WORK_STRUCT_STATIC_BIT = 4,
36 WORK_STRUCT_COLOR_SHIFT = 5,
37#else
38 WORK_STRUCT_COLOR_SHIFT = 4,
39#endif
40
41 WORK_STRUCT_COLOR_BITS = 4,
42
43 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
44 WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
45 WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT,
46 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
47#ifdef CONFIG_DEBUG_OBJECTS_WORK
48 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
49#else
50 WORK_STRUCT_STATIC = 0,
51#endif
52
53
54
55
56
57 WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1,
58 WORK_NO_COLOR = WORK_NR_COLORS,
59
60
61 WORK_CPU_UNBOUND = NR_CPUS,
62
63
64
65
66
67
68 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
69 WORK_STRUCT_COLOR_BITS,
70
71
72 WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT,
73
74 __WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE,
75 WORK_OFFQ_CANCELING = (1 << __WORK_OFFQ_CANCELING),
76
77
78
79
80
81
82 WORK_OFFQ_FLAG_BITS = 1,
83 WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
84 WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
85 WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
86 WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1,
87
88
89 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
90 WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
91 WORK_STRUCT_NO_POOL = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT,
92
93
94 WORK_BUSY_PENDING = 1 << 0,
95 WORK_BUSY_RUNNING = 1 << 1,
96
97
98 WORKER_DESC_LEN = 24,
99};
100
101struct work_struct {
102 atomic_long_t data;
103 struct list_head entry;
104 work_func_t func;
105#ifdef CONFIG_LOCKDEP
106 struct lockdep_map lockdep_map;
107#endif
108};
109
110#define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL)
111#define WORK_DATA_STATIC_INIT() \
112 ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC))
113
114struct delayed_work {
115 struct work_struct work;
116 struct timer_list timer;
117
118
119 struct workqueue_struct *wq;
120 int cpu;
121};
122
123
124
125
126
127
128struct workqueue_attrs {
129
130
131
132 int nice;
133
134
135
136
137 cpumask_var_t cpumask;
138
139
140
141
142
143
144
145
146 bool no_numa;
147};
148
149static inline struct delayed_work *to_delayed_work(struct work_struct *work)
150{
151 return container_of(work, struct delayed_work, work);
152}
153
154struct execute_work {
155 struct work_struct work;
156};
157
158#ifdef CONFIG_LOCKDEP
159
160
161
162
163
164#define __WORK_INIT_LOCKDEP_MAP(n, k) \
165 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
166#else
167#define __WORK_INIT_LOCKDEP_MAP(n, k)
168#endif
169
170#define __WORK_INITIALIZER(n, f) { \
171 .data = WORK_DATA_STATIC_INIT(), \
172 .entry = { &(n).entry, &(n).entry }, \
173 .func = (f), \
174 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
175 }
176
177#define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \
178 .work = __WORK_INITIALIZER((n).work, (f)), \
179 .timer = __TIMER_INITIALIZER(delayed_work_timer_fn,\
180 (tflags) | TIMER_IRQSAFE), \
181 }
182
183#define DECLARE_WORK(n, f) \
184 struct work_struct n = __WORK_INITIALIZER(n, f)
185
186#define DECLARE_DELAYED_WORK(n, f) \
187 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
188
189#define DECLARE_DEFERRABLE_WORK(n, f) \
190 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
191
192#ifdef CONFIG_DEBUG_OBJECTS_WORK
193extern void __init_work(struct work_struct *work, int onstack);
194extern void destroy_work_on_stack(struct work_struct *work);
195extern void destroy_delayed_work_on_stack(struct delayed_work *work);
196static inline unsigned int work_static(struct work_struct *work)
197{
198 return *work_data_bits(work) & WORK_STRUCT_STATIC;
199}
200#else
201static inline void __init_work(struct work_struct *work, int onstack) { }
202static inline void destroy_work_on_stack(struct work_struct *work) { }
203static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { }
204static inline unsigned int work_static(struct work_struct *work) { return 0; }
205#endif
206
207
208
209
210
211
212
213
214#ifdef CONFIG_LOCKDEP
215#define __INIT_WORK(_work, _func, _onstack) \
216 do { \
217 static struct lock_class_key __key; \
218 \
219 __init_work((_work), _onstack); \
220 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
221 lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \
222 INIT_LIST_HEAD(&(_work)->entry); \
223 (_work)->func = (_func); \
224 } while (0)
225#else
226#define __INIT_WORK(_work, _func, _onstack) \
227 do { \
228 __init_work((_work), _onstack); \
229 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
230 INIT_LIST_HEAD(&(_work)->entry); \
231 (_work)->func = (_func); \
232 } while (0)
233#endif
234
235#define INIT_WORK(_work, _func) \
236 __INIT_WORK((_work), (_func), 0)
237
238#define INIT_WORK_ONSTACK(_work, _func) \
239 __INIT_WORK((_work), (_func), 1)
240
241#define __INIT_DELAYED_WORK(_work, _func, _tflags) \
242 do { \
243 INIT_WORK(&(_work)->work, (_func)); \
244 __init_timer(&(_work)->timer, \
245 delayed_work_timer_fn, \
246 (_tflags) | TIMER_IRQSAFE); \
247 } while (0)
248
249#define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \
250 do { \
251 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
252 __init_timer_on_stack(&(_work)->timer, \
253 delayed_work_timer_fn, \
254 (_tflags) | TIMER_IRQSAFE); \
255 } while (0)
256
257#define INIT_DELAYED_WORK(_work, _func) \
258 __INIT_DELAYED_WORK(_work, _func, 0)
259
260#define INIT_DELAYED_WORK_ONSTACK(_work, _func) \
261 __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
262
263#define INIT_DEFERRABLE_WORK(_work, _func) \
264 __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
265
266#define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \
267 __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
268
269
270
271
272
273#define work_pending(work) \
274 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
275
276
277
278
279
280
281#define delayed_work_pending(w) \
282 work_pending(&(w)->work)
283
284
285
286
287
288enum {
289 WQ_UNBOUND = 1 << 1,
290 WQ_FREEZABLE = 1 << 2,
291 WQ_MEM_RECLAIM = 1 << 3,
292 WQ_HIGHPRI = 1 << 4,
293 WQ_CPU_INTENSIVE = 1 << 5,
294 WQ_SYSFS = 1 << 6,
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321 WQ_POWER_EFFICIENT = 1 << 7,
322
323 __WQ_DRAINING = 1 << 16,
324 __WQ_ORDERED = 1 << 17,
325 __WQ_LEGACY = 1 << 18,
326 __WQ_ORDERED_EXPLICIT = 1 << 19,
327
328 WQ_MAX_ACTIVE = 512,
329 WQ_MAX_UNBOUND_PER_CPU = 4,
330 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
331};
332
333
334#define WQ_UNBOUND_MAX_ACTIVE \
335 max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365extern struct workqueue_struct *system_wq;
366extern struct workqueue_struct *system_highpri_wq;
367extern struct workqueue_struct *system_long_wq;
368extern struct workqueue_struct *system_unbound_wq;
369extern struct workqueue_struct *system_freezable_wq;
370extern struct workqueue_struct *system_power_efficient_wq;
371extern struct workqueue_struct *system_freezable_power_efficient_wq;
372
373extern struct workqueue_struct *
374__alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
375 struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6);
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394#ifdef CONFIG_LOCKDEP
395#define alloc_workqueue(fmt, flags, max_active, args...) \
396({ \
397 static struct lock_class_key __key; \
398 const char *__lock_name; \
399 \
400 __lock_name = "(wq_completion)"#fmt#args; \
401 \
402 __alloc_workqueue_key((fmt), (flags), (max_active), \
403 &__key, __lock_name, ##args); \
404})
405#else
406#define alloc_workqueue(fmt, flags, max_active, args...) \
407 __alloc_workqueue_key((fmt), (flags), (max_active), \
408 NULL, NULL, ##args)
409#endif
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424#define alloc_ordered_workqueue(fmt, flags, args...) \
425 alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \
426 __WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
427
428#define create_workqueue(name) \
429 alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
430#define create_freezable_workqueue(name) \
431 alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \
432 WQ_MEM_RECLAIM, 1, (name))
433#define create_singlethread_workqueue(name) \
434 alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name)
435
436extern void destroy_workqueue(struct workqueue_struct *wq);
437
438struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask);
439void free_workqueue_attrs(struct workqueue_attrs *attrs);
440int apply_workqueue_attrs(struct workqueue_struct *wq,
441 const struct workqueue_attrs *attrs);
442int workqueue_set_unbound_cpumask(cpumask_var_t cpumask);
443
444extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
445 struct work_struct *work);
446extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
447 struct delayed_work *work, unsigned long delay);
448extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
449 struct delayed_work *dwork, unsigned long delay);
450
451extern void flush_workqueue(struct workqueue_struct *wq);
452extern void drain_workqueue(struct workqueue_struct *wq);
453
454extern int schedule_on_each_cpu(work_func_t func);
455
456int execute_in_process_context(work_func_t fn, struct execute_work *);
457
458extern bool flush_work(struct work_struct *work);
459extern bool cancel_work(struct work_struct *work);
460extern bool cancel_work_sync(struct work_struct *work);
461
462extern bool flush_delayed_work(struct delayed_work *dwork);
463extern bool cancel_delayed_work(struct delayed_work *dwork);
464extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
465
466extern void workqueue_set_max_active(struct workqueue_struct *wq,
467 int max_active);
468extern bool current_is_workqueue_rescuer(void);
469extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
470extern unsigned int work_busy(struct work_struct *work);
471extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
472extern void print_worker_info(const char *log_lvl, struct task_struct *task);
473extern void show_workqueue_state(void);
474
475
476
477
478
479
480
481
482
483
484
485static inline bool queue_work(struct workqueue_struct *wq,
486 struct work_struct *work)
487{
488 return queue_work_on(WORK_CPU_UNBOUND, wq, work);
489}
490
491
492
493
494
495
496
497
498
499static inline bool queue_delayed_work(struct workqueue_struct *wq,
500 struct delayed_work *dwork,
501 unsigned long delay)
502{
503 return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
504}
505
506
507
508
509
510
511
512
513
514static inline bool mod_delayed_work(struct workqueue_struct *wq,
515 struct delayed_work *dwork,
516 unsigned long delay)
517{
518 return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
519}
520
521
522
523
524
525
526
527
528static inline bool schedule_work_on(int cpu, struct work_struct *work)
529{
530 return queue_work_on(cpu, system_wq, work);
531}
532
533
534
535
536
537
538
539
540
541
542
543
544static inline bool schedule_work(struct work_struct *work)
545{
546 return queue_work(system_wq, work);
547}
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573static inline void flush_scheduled_work(void)
574{
575 flush_workqueue(system_wq);
576}
577
578
579
580
581
582
583
584
585
586
587static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
588 unsigned long delay)
589{
590 return queue_delayed_work_on(cpu, system_wq, dwork, delay);
591}
592
593
594
595
596
597
598
599
600
601static inline bool schedule_delayed_work(struct delayed_work *dwork,
602 unsigned long delay)
603{
604 return queue_delayed_work(system_wq, dwork, delay);
605}
606
607#ifndef CONFIG_SMP
608static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
609{
610 return fn(arg);
611}
612static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
613{
614 return fn(arg);
615}
616#else
617long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
618long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg);
619#endif
620
621#ifdef CONFIG_FREEZER
622extern void freeze_workqueues_begin(void);
623extern bool freeze_workqueues_busy(void);
624extern void thaw_workqueues(void);
625#endif
626
627#ifdef CONFIG_SYSFS
628int workqueue_sysfs_register(struct workqueue_struct *wq);
629#else
630static inline int workqueue_sysfs_register(struct workqueue_struct *wq)
631{ return 0; }
632#endif
633
634#ifdef CONFIG_WQ_WATCHDOG
635void wq_watchdog_touch(int cpu);
636#else
637static inline void wq_watchdog_touch(int cpu) { }
638#endif
639
640#ifdef CONFIG_SMP
641int workqueue_prepare_cpu(unsigned int cpu);
642int workqueue_online_cpu(unsigned int cpu);
643int workqueue_offline_cpu(unsigned int cpu);
644#endif
645
646int __init workqueue_init_early(void);
647int __init workqueue_init(void);
648
649#endif
650