1
2
3
4
5#ifndef _LINUX_WORKQUEUE_H
6#define _LINUX_WORKQUEUE_H
7
8#include <linux/timer.h>
9#include <linux/linkage.h>
10#include <linux/bitops.h>
11#include <linux/lockdep.h>
12#include <linux/threads.h>
13#include <linux/atomic.h>
14#include <linux/cpumask.h>
15
16struct workqueue_struct;
17
18struct work_struct;
19typedef void (*work_func_t)(struct work_struct *work);
20void delayed_work_timer_fn(unsigned long __data);
21
22
23
24
25
26#define work_data_bits(work) ((unsigned long *)(&(work)->data))
27
28enum {
29 WORK_STRUCT_PENDING_BIT = 0,
30 WORK_STRUCT_DELAYED_BIT = 1,
31 WORK_STRUCT_PWQ_BIT = 2,
32 WORK_STRUCT_LINKED_BIT = 3,
33#ifdef CONFIG_DEBUG_OBJECTS_WORK
34 WORK_STRUCT_STATIC_BIT = 4,
35 WORK_STRUCT_COLOR_SHIFT = 5,
36#else
37 WORK_STRUCT_COLOR_SHIFT = 4,
38#endif
39
40 WORK_STRUCT_COLOR_BITS = 4,
41
42 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
43 WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
44 WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT,
45 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
46#ifdef CONFIG_DEBUG_OBJECTS_WORK
47 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
48#else
49 WORK_STRUCT_STATIC = 0,
50#endif
51
52
53
54
55
56 WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1,
57 WORK_NO_COLOR = WORK_NR_COLORS,
58
59
60 WORK_CPU_UNBOUND = NR_CPUS,
61
62
63
64
65
66
67 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
68 WORK_STRUCT_COLOR_BITS,
69
70
71 WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT,
72
73 __WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE,
74 WORK_OFFQ_CANCELING = (1 << __WORK_OFFQ_CANCELING),
75
76
77
78
79
80
81 WORK_OFFQ_FLAG_BITS = 1,
82 WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
83 WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
84 WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
85 WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1,
86
87
88 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
89 WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
90 WORK_STRUCT_NO_POOL = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT,
91
92
93 WORK_BUSY_PENDING = 1 << 0,
94 WORK_BUSY_RUNNING = 1 << 1,
95
96
97 WORKER_DESC_LEN = 24,
98};
99
100struct work_struct {
101 atomic_long_t data;
102 struct list_head entry;
103 work_func_t func;
104#ifdef CONFIG_LOCKDEP
105 struct lockdep_map lockdep_map;
106#endif
107};
108
109#define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL)
110#define WORK_DATA_STATIC_INIT() \
111 ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC))
112
113struct delayed_work {
114 struct work_struct work;
115 struct timer_list timer;
116
117
118 struct workqueue_struct *wq;
119 int cpu;
120};
121
122
123
124
125
126
127struct workqueue_attrs {
128
129
130
131 int nice;
132
133
134
135
136 cpumask_var_t cpumask;
137
138
139
140
141
142
143
144
145 bool no_numa;
146};
147
148static inline struct delayed_work *to_delayed_work(struct work_struct *work)
149{
150 return container_of(work, struct delayed_work, work);
151}
152
153struct execute_work {
154 struct work_struct work;
155};
156
157#ifdef CONFIG_LOCKDEP
158
159
160
161
162
163#define __WORK_INIT_LOCKDEP_MAP(n, k) \
164 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
165#else
166#define __WORK_INIT_LOCKDEP_MAP(n, k)
167#endif
168
169#define __WORK_INITIALIZER(n, f) { \
170 .data = WORK_DATA_STATIC_INIT(), \
171 .entry = { &(n).entry, &(n).entry }, \
172 .func = (f), \
173 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
174 }
175
176#define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \
177 .work = __WORK_INITIALIZER((n).work, (f)), \
178 .timer = __TIMER_INITIALIZER(delayed_work_timer_fn, \
179 0, (unsigned long)&(n), \
180 (tflags) | TIMER_IRQSAFE), \
181 }
182
183#define DECLARE_WORK(n, f) \
184 struct work_struct n = __WORK_INITIALIZER(n, f)
185
186#define DECLARE_DELAYED_WORK(n, f) \
187 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
188
189#define DECLARE_DEFERRABLE_WORK(n, f) \
190 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
191
192#ifdef CONFIG_DEBUG_OBJECTS_WORK
193extern void __init_work(struct work_struct *work, int onstack);
194extern void destroy_work_on_stack(struct work_struct *work);
195extern void destroy_delayed_work_on_stack(struct delayed_work *work);
196static inline unsigned int work_static(struct work_struct *work)
197{
198 return *work_data_bits(work) & WORK_STRUCT_STATIC;
199}
200#else
201static inline void __init_work(struct work_struct *work, int onstack) { }
202static inline void destroy_work_on_stack(struct work_struct *work) { }
203static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { }
204static inline unsigned int work_static(struct work_struct *work) { return 0; }
205#endif
206
207
208
209
210
211
212
213
214#ifdef CONFIG_LOCKDEP
215#define __INIT_WORK(_work, _func, _onstack) \
216 do { \
217 static struct lock_class_key __key; \
218 \
219 __init_work((_work), _onstack); \
220 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
221 lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \
222 INIT_LIST_HEAD(&(_work)->entry); \
223 (_work)->func = (_func); \
224 } while (0)
225#else
226#define __INIT_WORK(_work, _func, _onstack) \
227 do { \
228 __init_work((_work), _onstack); \
229 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
230 INIT_LIST_HEAD(&(_work)->entry); \
231 (_work)->func = (_func); \
232 } while (0)
233#endif
234
235#define INIT_WORK(_work, _func) \
236 __INIT_WORK((_work), (_func), 0)
237
238#define INIT_WORK_ONSTACK(_work, _func) \
239 __INIT_WORK((_work), (_func), 1)
240
241#define __INIT_DELAYED_WORK(_work, _func, _tflags) \
242 do { \
243 INIT_WORK(&(_work)->work, (_func)); \
244 __setup_timer(&(_work)->timer, delayed_work_timer_fn, \
245 (unsigned long)(_work), \
246 (_tflags) | TIMER_IRQSAFE); \
247 } while (0)
248
249#define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \
250 do { \
251 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
252 __setup_timer_on_stack(&(_work)->timer, \
253 delayed_work_timer_fn, \
254 (unsigned long)(_work), \
255 (_tflags) | TIMER_IRQSAFE); \
256 } while (0)
257
258#define INIT_DELAYED_WORK(_work, _func) \
259 __INIT_DELAYED_WORK(_work, _func, 0)
260
261#define INIT_DELAYED_WORK_ONSTACK(_work, _func) \
262 __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
263
264#define INIT_DEFERRABLE_WORK(_work, _func) \
265 __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
266
267#define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \
268 __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
269
270
271
272
273
274#define work_pending(work) \
275 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
276
277
278
279
280
281
282#define delayed_work_pending(w) \
283 work_pending(&(w)->work)
284
285
286
287
288
289enum {
290 WQ_UNBOUND = 1 << 1,
291 WQ_FREEZABLE = 1 << 2,
292 WQ_MEM_RECLAIM = 1 << 3,
293 WQ_HIGHPRI = 1 << 4,
294 WQ_CPU_INTENSIVE = 1 << 5,
295 WQ_SYSFS = 1 << 6,
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322 WQ_POWER_EFFICIENT = 1 << 7,
323
324 __WQ_DRAINING = 1 << 16,
325 __WQ_ORDERED = 1 << 17,
326 __WQ_ORDERED_EXPLICIT = 1 << 18,
327 __WQ_LEGACY = 1 << 18,
328
329 WQ_MAX_ACTIVE = 512,
330 WQ_MAX_UNBOUND_PER_CPU = 4,
331 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
332};
333
334
335#define WQ_UNBOUND_MAX_ACTIVE \
336 max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366extern struct workqueue_struct *system_wq;
367extern struct workqueue_struct *system_highpri_wq;
368extern struct workqueue_struct *system_long_wq;
369extern struct workqueue_struct *system_unbound_wq;
370extern struct workqueue_struct *system_freezable_wq;
371extern struct workqueue_struct *system_power_efficient_wq;
372extern struct workqueue_struct *system_freezable_power_efficient_wq;
373
374extern struct workqueue_struct *
375__alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
376 struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6);
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395#ifdef CONFIG_LOCKDEP
396#define alloc_workqueue(fmt, flags, max_active, args...) \
397({ \
398 static struct lock_class_key __key; \
399 const char *__lock_name; \
400 \
401 __lock_name = #fmt#args; \
402 \
403 __alloc_workqueue_key((fmt), (flags), (max_active), \
404 &__key, __lock_name, ##args); \
405})
406#else
407#define alloc_workqueue(fmt, flags, max_active, args...) \
408 __alloc_workqueue_key((fmt), (flags), (max_active), \
409 NULL, NULL, ##args)
410#endif
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425#define alloc_ordered_workqueue(fmt, flags, args...) \
426 alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \
427 __WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
428
429#define create_workqueue(name) \
430 alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
431#define create_freezable_workqueue(name) \
432 alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \
433 WQ_MEM_RECLAIM, 1, (name))
434#define create_singlethread_workqueue(name) \
435 alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name)
436
437extern void destroy_workqueue(struct workqueue_struct *wq);
438
439struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask);
440void free_workqueue_attrs(struct workqueue_attrs *attrs);
441int apply_workqueue_attrs(struct workqueue_struct *wq,
442 const struct workqueue_attrs *attrs);
443int workqueue_set_unbound_cpumask(cpumask_var_t cpumask);
444
445extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
446 struct work_struct *work);
447extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
448 struct delayed_work *work, unsigned long delay);
449extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
450 struct delayed_work *dwork, unsigned long delay);
451
452extern void flush_workqueue(struct workqueue_struct *wq);
453extern void drain_workqueue(struct workqueue_struct *wq);
454
455extern int schedule_on_each_cpu(work_func_t func);
456
457int execute_in_process_context(work_func_t fn, struct execute_work *);
458
459extern bool flush_work(struct work_struct *work);
460extern bool cancel_work(struct work_struct *work);
461extern bool cancel_work_sync(struct work_struct *work);
462
463extern bool flush_delayed_work(struct delayed_work *dwork);
464extern bool cancel_delayed_work(struct delayed_work *dwork);
465extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
466
467extern void workqueue_set_max_active(struct workqueue_struct *wq,
468 int max_active);
469extern bool current_is_workqueue_rescuer(void);
470extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
471extern unsigned int work_busy(struct work_struct *work);
472extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
473extern void print_worker_info(const char *log_lvl, struct task_struct *task);
474extern void show_workqueue_state(void);
475
476
477
478
479
480
481
482
483
484
485
486static inline bool queue_work(struct workqueue_struct *wq,
487 struct work_struct *work)
488{
489 return queue_work_on(WORK_CPU_UNBOUND, wq, work);
490}
491
492
493
494
495
496
497
498
499
500static inline bool queue_delayed_work(struct workqueue_struct *wq,
501 struct delayed_work *dwork,
502 unsigned long delay)
503{
504 return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
505}
506
507
508
509
510
511
512
513
514
515static inline bool mod_delayed_work(struct workqueue_struct *wq,
516 struct delayed_work *dwork,
517 unsigned long delay)
518{
519 return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
520}
521
522
523
524
525
526
527
528
529static inline bool schedule_work_on(int cpu, struct work_struct *work)
530{
531 return queue_work_on(cpu, system_wq, work);
532}
533
534
535
536
537
538
539
540
541
542
543
544
545static inline bool schedule_work(struct work_struct *work)
546{
547 return queue_work(system_wq, work);
548}
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574static inline void flush_scheduled_work(void)
575{
576 flush_workqueue(system_wq);
577}
578
579
580
581
582
583
584
585
586
587
588static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
589 unsigned long delay)
590{
591 return queue_delayed_work_on(cpu, system_wq, dwork, delay);
592}
593
594
595
596
597
598
599
600
601
602static inline bool schedule_delayed_work(struct delayed_work *dwork,
603 unsigned long delay)
604{
605 return queue_delayed_work(system_wq, dwork, delay);
606}
607
608#ifndef CONFIG_SMP
609static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
610{
611 return fn(arg);
612}
613static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
614{
615 return fn(arg);
616}
617#else
618long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
619long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg);
620#endif
621
622#ifdef CONFIG_FREEZER
623extern void freeze_workqueues_begin(void);
624extern bool freeze_workqueues_busy(void);
625extern void thaw_workqueues(void);
626#endif
627
628#ifdef CONFIG_SYSFS
629int workqueue_sysfs_register(struct workqueue_struct *wq);
630#else
631static inline int workqueue_sysfs_register(struct workqueue_struct *wq)
632{ return 0; }
633#endif
634
635#ifdef CONFIG_WQ_WATCHDOG
636void wq_watchdog_touch(int cpu);
637#else
638static inline void wq_watchdog_touch(int cpu) { }
639#endif
640
641#ifdef CONFIG_SMP
642int workqueue_prepare_cpu(unsigned int cpu);
643int workqueue_online_cpu(unsigned int cpu);
644int workqueue_offline_cpu(unsigned int cpu);
645#endif
646
647int __init workqueue_init_early(void);
648int __init workqueue_init(void);
649
650#endif
651