1
2
3
4
5
6#ifndef _LINUX_WORKQUEUE_H
7#define _LINUX_WORKQUEUE_H
8
9#include <linux/timer.h>
10#include <linux/linkage.h>
11#include <linux/bitops.h>
12#include <linux/lockdep.h>
13#include <linux/threads.h>
14#include <linux/atomic.h>
15#include <linux/cpumask.h>
16#include <linux/rcupdate.h>
17#include <linux/rh_kabi.h>
18
19struct workqueue_struct;
20
21struct work_struct;
22typedef void (*work_func_t)(struct work_struct *work);
23void delayed_work_timer_fn(struct timer_list *t);
24
25
26
27
28
29#define work_data_bits(work) ((unsigned long *)(&(work)->data))
30
31enum {
32 WORK_STRUCT_PENDING_BIT = 0,
33 WORK_STRUCT_DELAYED_BIT = 1,
34 WORK_STRUCT_PWQ_BIT = 2,
35 WORK_STRUCT_LINKED_BIT = 3,
36#ifdef CONFIG_DEBUG_OBJECTS_WORK
37 WORK_STRUCT_STATIC_BIT = 4,
38 WORK_STRUCT_COLOR_SHIFT = 5,
39#else
40 WORK_STRUCT_COLOR_SHIFT = 4,
41#endif
42
43 WORK_STRUCT_COLOR_BITS = 4,
44
45 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
46 WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
47 WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT,
48 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
49#ifdef CONFIG_DEBUG_OBJECTS_WORK
50 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
51#else
52 WORK_STRUCT_STATIC = 0,
53#endif
54
55
56
57
58
59 WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1,
60 WORK_NO_COLOR = WORK_NR_COLORS,
61
62
63 WORK_CPU_UNBOUND = NR_CPUS,
64
65
66
67
68
69
70 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
71 WORK_STRUCT_COLOR_BITS,
72
73
74 WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT,
75
76 __WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE,
77 WORK_OFFQ_CANCELING = (1 << __WORK_OFFQ_CANCELING),
78
79
80
81
82
83
84 WORK_OFFQ_FLAG_BITS = 1,
85 WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
86 WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
87 WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
88 WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1,
89
90
91 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
92 WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
93 WORK_STRUCT_NO_POOL = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT,
94
95
96 WORK_BUSY_PENDING = 1 << 0,
97 WORK_BUSY_RUNNING = 1 << 1,
98
99
100 WORKER_DESC_LEN = 24,
101};
102
103struct work_struct {
104 atomic_long_t data;
105 struct list_head entry;
106 work_func_t func;
107#ifdef CONFIG_LOCKDEP
108 struct lockdep_map lockdep_map;
109#endif
110 RH_KABI_RESERVE(1)
111 RH_KABI_RESERVE(2)
112 RH_KABI_RESERVE(3)
113 RH_KABI_RESERVE(4)
114};
115
116#define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL)
117#define WORK_DATA_STATIC_INIT() \
118 ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC))
119
120struct delayed_work {
121 struct work_struct work;
122 struct timer_list timer;
123
124
125 struct workqueue_struct *wq;
126 int cpu;
127 RH_KABI_RESERVE(1)
128 RH_KABI_RESERVE(2)
129 RH_KABI_RESERVE(3)
130 RH_KABI_RESERVE(4)
131};
132
133struct rcu_work {
134 struct work_struct work;
135 struct rcu_head rcu;
136
137
138 struct workqueue_struct *wq;
139};
140
141
142
143
144
145
146struct workqueue_attrs {
147
148
149
150 int nice;
151
152
153
154
155 cpumask_var_t cpumask;
156
157
158
159
160
161
162
163
164 bool no_numa;
165};
166
167static inline struct delayed_work *to_delayed_work(struct work_struct *work)
168{
169 return container_of(work, struct delayed_work, work);
170}
171
172static inline struct rcu_work *to_rcu_work(struct work_struct *work)
173{
174 return container_of(work, struct rcu_work, work);
175}
176
177struct execute_work {
178 struct work_struct work;
179};
180
181#ifdef CONFIG_LOCKDEP
182
183
184
185
186
187#define __WORK_INIT_LOCKDEP_MAP(n, k) \
188 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
189#else
190#define __WORK_INIT_LOCKDEP_MAP(n, k)
191#endif
192
193#define __WORK_INITIALIZER(n, f) { \
194 .data = WORK_DATA_STATIC_INIT(), \
195 .entry = { &(n).entry, &(n).entry }, \
196 .func = (f), \
197 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
198 }
199
200#define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \
201 .work = __WORK_INITIALIZER((n).work, (f)), \
202 .timer = __TIMER_INITIALIZER(delayed_work_timer_fn,\
203 (tflags) | TIMER_IRQSAFE), \
204 }
205
206#define DECLARE_WORK(n, f) \
207 struct work_struct n = __WORK_INITIALIZER(n, f)
208
209#define DECLARE_DELAYED_WORK(n, f) \
210 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
211
212#define DECLARE_DEFERRABLE_WORK(n, f) \
213 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
214
215#ifdef CONFIG_DEBUG_OBJECTS_WORK
216extern void __init_work(struct work_struct *work, int onstack);
217extern void destroy_work_on_stack(struct work_struct *work);
218extern void destroy_delayed_work_on_stack(struct delayed_work *work);
219static inline unsigned int work_static(struct work_struct *work)
220{
221 return *work_data_bits(work) & WORK_STRUCT_STATIC;
222}
223#else
224static inline void __init_work(struct work_struct *work, int onstack) { }
225static inline void destroy_work_on_stack(struct work_struct *work) { }
226static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { }
227static inline unsigned int work_static(struct work_struct *work) { return 0; }
228#endif
229
230
231
232
233
234
235
236
237#ifdef CONFIG_LOCKDEP
238#define __INIT_WORK(_work, _func, _onstack) \
239 do { \
240 static struct lock_class_key __key; \
241 \
242 __init_work((_work), _onstack); \
243 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
244 lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \
245 INIT_LIST_HEAD(&(_work)->entry); \
246 (_work)->func = (_func); \
247 } while (0)
248#else
249#define __INIT_WORK(_work, _func, _onstack) \
250 do { \
251 __init_work((_work), _onstack); \
252 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
253 INIT_LIST_HEAD(&(_work)->entry); \
254 (_work)->func = (_func); \
255 } while (0)
256#endif
257
258#define INIT_WORK(_work, _func) \
259 __INIT_WORK((_work), (_func), 0)
260
261#define INIT_WORK_ONSTACK(_work, _func) \
262 __INIT_WORK((_work), (_func), 1)
263
264#define __INIT_DELAYED_WORK(_work, _func, _tflags) \
265 do { \
266 INIT_WORK(&(_work)->work, (_func)); \
267 __init_timer(&(_work)->timer, \
268 delayed_work_timer_fn, \
269 (_tflags) | TIMER_IRQSAFE); \
270 } while (0)
271
272#define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \
273 do { \
274 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
275 __init_timer_on_stack(&(_work)->timer, \
276 delayed_work_timer_fn, \
277 (_tflags) | TIMER_IRQSAFE); \
278 } while (0)
279
280#define INIT_DELAYED_WORK(_work, _func) \
281 __INIT_DELAYED_WORK(_work, _func, 0)
282
283#define INIT_DELAYED_WORK_ONSTACK(_work, _func) \
284 __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
285
286#define INIT_DEFERRABLE_WORK(_work, _func) \
287 __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
288
289#define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \
290 __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
291
292#define INIT_RCU_WORK(_work, _func) \
293 INIT_WORK(&(_work)->work, (_func))
294
295#define INIT_RCU_WORK_ONSTACK(_work, _func) \
296 INIT_WORK_ONSTACK(&(_work)->work, (_func))
297
298
299
300
301
302#define work_pending(work) \
303 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
304
305
306
307
308
309
310#define delayed_work_pending(w) \
311 work_pending(&(w)->work)
312
313
314
315
316
317enum {
318 WQ_UNBOUND = 1 << 1,
319 WQ_FREEZABLE = 1 << 2,
320 WQ_MEM_RECLAIM = 1 << 3,
321 WQ_HIGHPRI = 1 << 4,
322 WQ_CPU_INTENSIVE = 1 << 5,
323 WQ_SYSFS = 1 << 6,
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350 WQ_POWER_EFFICIENT = 1 << 7,
351
352 __WQ_DRAINING = 1 << 16,
353 __WQ_ORDERED = 1 << 17,
354 __WQ_LEGACY = 1 << 18,
355 __WQ_ORDERED_EXPLICIT = 1 << 19,
356
357 WQ_MAX_ACTIVE = 512,
358 WQ_MAX_UNBOUND_PER_CPU = 4,
359 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
360};
361
362
363#define WQ_UNBOUND_MAX_ACTIVE \
364 max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394extern struct workqueue_struct *system_wq;
395extern struct workqueue_struct *system_highpri_wq;
396extern struct workqueue_struct *system_long_wq;
397extern struct workqueue_struct *system_unbound_wq;
398extern struct workqueue_struct *system_freezable_wq;
399extern struct workqueue_struct *system_power_efficient_wq;
400extern struct workqueue_struct *system_freezable_power_efficient_wq;
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416struct workqueue_struct *alloc_workqueue(const char *fmt,
417 unsigned int flags,
418 int max_active, ...);
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433#define alloc_ordered_workqueue(fmt, flags, args...) \
434 alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \
435 __WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
436
437#define create_workqueue(name) \
438 alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
439#define create_freezable_workqueue(name) \
440 alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \
441 WQ_MEM_RECLAIM, 1, (name))
442#define create_singlethread_workqueue(name) \
443 alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name)
444
445extern void destroy_workqueue(struct workqueue_struct *wq);
446
447struct workqueue_attrs *alloc_workqueue_attrs(void);
448void free_workqueue_attrs(struct workqueue_attrs *attrs);
449int apply_workqueue_attrs(struct workqueue_struct *wq,
450 const struct workqueue_attrs *attrs);
451int workqueue_set_unbound_cpumask(cpumask_var_t cpumask);
452
453extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
454 struct work_struct *work);
455extern bool queue_work_node(int node, struct workqueue_struct *wq,
456 struct work_struct *work);
457extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
458 struct delayed_work *work, unsigned long delay);
459extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
460 struct delayed_work *dwork, unsigned long delay);
461extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork);
462
463extern void flush_workqueue(struct workqueue_struct *wq);
464extern void drain_workqueue(struct workqueue_struct *wq);
465
466extern int schedule_on_each_cpu(work_func_t func);
467
468int execute_in_process_context(work_func_t fn, struct execute_work *);
469
470extern bool flush_work(struct work_struct *work);
471extern bool cancel_work_sync(struct work_struct *work);
472
473extern bool flush_delayed_work(struct delayed_work *dwork);
474extern bool cancel_delayed_work(struct delayed_work *dwork);
475extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
476
477extern bool flush_rcu_work(struct rcu_work *rwork);
478
479extern void workqueue_set_max_active(struct workqueue_struct *wq,
480 int max_active);
481extern struct work_struct *current_work(void);
482extern bool current_is_workqueue_rescuer(void);
483extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
484extern unsigned int work_busy(struct work_struct *work);
485extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
486extern void print_worker_info(const char *log_lvl, struct task_struct *task);
487extern void show_workqueue_state(void);
488extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
489
490
491
492
493
494
495
496
497
498
499
500static inline bool queue_work(struct workqueue_struct *wq,
501 struct work_struct *work)
502{
503 return queue_work_on(WORK_CPU_UNBOUND, wq, work);
504}
505
506
507
508
509
510
511
512
513
514static inline bool queue_delayed_work(struct workqueue_struct *wq,
515 struct delayed_work *dwork,
516 unsigned long delay)
517{
518 return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
519}
520
521
522
523
524
525
526
527
528
529static inline bool mod_delayed_work(struct workqueue_struct *wq,
530 struct delayed_work *dwork,
531 unsigned long delay)
532{
533 return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
534}
535
536
537
538
539
540
541
542
543static inline bool schedule_work_on(int cpu, struct work_struct *work)
544{
545 return queue_work_on(cpu, system_wq, work);
546}
547
548
549
550
551
552
553
554
555
556
557
558
559static inline bool schedule_work(struct work_struct *work)
560{
561 return queue_work(system_wq, work);
562}
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588static inline void flush_scheduled_work(void)
589{
590 flush_workqueue(system_wq);
591}
592
593
594
595
596
597
598
599
600
601
602static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
603 unsigned long delay)
604{
605 return queue_delayed_work_on(cpu, system_wq, dwork, delay);
606}
607
608
609
610
611
612
613
614
615
616static inline bool schedule_delayed_work(struct delayed_work *dwork,
617 unsigned long delay)
618{
619 return queue_delayed_work(system_wq, dwork, delay);
620}
621
622#ifndef CONFIG_SMP
623static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
624{
625 return fn(arg);
626}
627static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
628{
629 return fn(arg);
630}
631#else
632long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
633long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg);
634#endif
635
636#ifdef CONFIG_FREEZER
637extern void freeze_workqueues_begin(void);
638extern bool freeze_workqueues_busy(void);
639extern void thaw_workqueues(void);
640#endif
641
642#ifdef CONFIG_SYSFS
643int workqueue_sysfs_register(struct workqueue_struct *wq);
644#else
645static inline int workqueue_sysfs_register(struct workqueue_struct *wq)
646{ return 0; }
647#endif
648
649#ifdef CONFIG_WQ_WATCHDOG
650void wq_watchdog_touch(int cpu);
651#else
652static inline void wq_watchdog_touch(int cpu) { }
653#endif
654
655#ifdef CONFIG_SMP
656int workqueue_prepare_cpu(unsigned int cpu);
657int workqueue_online_cpu(unsigned int cpu);
658int workqueue_offline_cpu(unsigned int cpu);
659#endif
660
661int __init workqueue_init_early(void);
662int __init workqueue_init(void);
663
664#endif
665