1
2
3
4
5
6#ifndef _LINUX_WORKQUEUE_H
7#define _LINUX_WORKQUEUE_H
8
9#include <linux/timer.h>
10#include <linux/linkage.h>
11#include <linux/bitops.h>
12#include <linux/lockdep.h>
13#include <linux/threads.h>
14#include <linux/atomic.h>
15#include <linux/cpumask.h>
16#include <linux/rcupdate.h>
17#include <linux/rh_kabi.h>
18
19struct workqueue_struct;
20
21struct work_struct;
22typedef void (*work_func_t)(struct work_struct *work);
23void delayed_work_timer_fn(struct timer_list *t);
24
25
26
27
28
29#define work_data_bits(work) ((unsigned long *)(&(work)->data))
30
31enum {
32 WORK_STRUCT_PENDING_BIT = 0,
33 WORK_STRUCT_DELAYED_BIT = 1,
34 WORK_STRUCT_PWQ_BIT = 2,
35 WORK_STRUCT_LINKED_BIT = 3,
36#ifdef CONFIG_DEBUG_OBJECTS_WORK
37 WORK_STRUCT_STATIC_BIT = 4,
38 WORK_STRUCT_COLOR_SHIFT = 5,
39#else
40 WORK_STRUCT_COLOR_SHIFT = 4,
41#endif
42
43 WORK_STRUCT_COLOR_BITS = 4,
44
45 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
46 WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
47 WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT,
48 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
49#ifdef CONFIG_DEBUG_OBJECTS_WORK
50 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
51#else
52 WORK_STRUCT_STATIC = 0,
53#endif
54
55
56
57
58
59 WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1,
60 WORK_NO_COLOR = WORK_NR_COLORS,
61
62
63 WORK_CPU_UNBOUND = NR_CPUS,
64
65
66
67
68
69
70 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
71 WORK_STRUCT_COLOR_BITS,
72
73
74 WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT,
75
76 __WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE,
77 WORK_OFFQ_CANCELING = (1 << __WORK_OFFQ_CANCELING),
78
79
80
81
82
83
84 WORK_OFFQ_FLAG_BITS = 1,
85 WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
86 WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
87 WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
88 WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1,
89
90
91 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
92 WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
93 WORK_STRUCT_NO_POOL = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT,
94
95
96 WORK_BUSY_PENDING = 1 << 0,
97 WORK_BUSY_RUNNING = 1 << 1,
98
99
100 WORKER_DESC_LEN = 24,
101};
102
103struct work_struct {
104 atomic_long_t data;
105 struct list_head entry;
106 work_func_t func;
107#ifdef CONFIG_LOCKDEP
108 struct lockdep_map lockdep_map;
109#endif
110
111 RH_KABI_USE(1, void *bdi_wb_backptr)
112 RH_KABI_RESERVE(2)
113 RH_KABI_RESERVE(3)
114 RH_KABI_RESERVE(4)
115};
116
117#define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL)
118#define WORK_DATA_STATIC_INIT() \
119 ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC))
120
121struct delayed_work {
122 struct work_struct work;
123 struct timer_list timer;
124
125
126 struct workqueue_struct *wq;
127 int cpu;
128 RH_KABI_RESERVE(1)
129 RH_KABI_RESERVE(2)
130 RH_KABI_RESERVE(3)
131 RH_KABI_RESERVE(4)
132};
133
134struct rcu_work {
135 struct work_struct work;
136 struct rcu_head rcu;
137
138
139 struct workqueue_struct *wq;
140};
141
142
143
144
145
146
147struct workqueue_attrs {
148
149
150
151 int nice;
152
153
154
155
156 cpumask_var_t cpumask;
157
158
159
160
161
162
163
164
165 bool no_numa;
166};
167
168static inline struct delayed_work *to_delayed_work(struct work_struct *work)
169{
170 return container_of(work, struct delayed_work, work);
171}
172
173static inline struct rcu_work *to_rcu_work(struct work_struct *work)
174{
175 return container_of(work, struct rcu_work, work);
176}
177
178struct execute_work {
179 struct work_struct work;
180};
181
182#ifdef CONFIG_LOCKDEP
183
184
185
186
187
188#define __WORK_INIT_LOCKDEP_MAP(n, k) \
189 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
190#else
191#define __WORK_INIT_LOCKDEP_MAP(n, k)
192#endif
193
194#define __WORK_INITIALIZER(n, f) { \
195 .data = WORK_DATA_STATIC_INIT(), \
196 .entry = { &(n).entry, &(n).entry }, \
197 .func = (f), \
198 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
199 }
200
201#define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \
202 .work = __WORK_INITIALIZER((n).work, (f)), \
203 .timer = __TIMER_INITIALIZER(delayed_work_timer_fn,\
204 (tflags) | TIMER_IRQSAFE), \
205 }
206
207#define DECLARE_WORK(n, f) \
208 struct work_struct n = __WORK_INITIALIZER(n, f)
209
210#define DECLARE_DELAYED_WORK(n, f) \
211 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
212
213#define DECLARE_DEFERRABLE_WORK(n, f) \
214 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
215
216#ifdef CONFIG_DEBUG_OBJECTS_WORK
217extern void __init_work(struct work_struct *work, int onstack);
218extern void destroy_work_on_stack(struct work_struct *work);
219extern void destroy_delayed_work_on_stack(struct delayed_work *work);
220static inline unsigned int work_static(struct work_struct *work)
221{
222 return *work_data_bits(work) & WORK_STRUCT_STATIC;
223}
224#else
225static inline void __init_work(struct work_struct *work, int onstack) { }
226static inline void destroy_work_on_stack(struct work_struct *work) { }
227static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { }
228static inline unsigned int work_static(struct work_struct *work) { return 0; }
229#endif
230
231
232
233
234
235
236
237
238#ifdef CONFIG_LOCKDEP
239#define __INIT_WORK(_work, _func, _onstack) \
240 do { \
241 static struct lock_class_key __key; \
242 \
243 __init_work((_work), _onstack); \
244 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
245 lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \
246 INIT_LIST_HEAD(&(_work)->entry); \
247 (_work)->func = (_func); \
248 } while (0)
249#else
250#define __INIT_WORK(_work, _func, _onstack) \
251 do { \
252 __init_work((_work), _onstack); \
253 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
254 INIT_LIST_HEAD(&(_work)->entry); \
255 (_work)->func = (_func); \
256 } while (0)
257#endif
258
259#define INIT_WORK(_work, _func) \
260 __INIT_WORK((_work), (_func), 0)
261
262#define INIT_WORK_ONSTACK(_work, _func) \
263 __INIT_WORK((_work), (_func), 1)
264
265#define __INIT_DELAYED_WORK(_work, _func, _tflags) \
266 do { \
267 INIT_WORK(&(_work)->work, (_func)); \
268 __init_timer(&(_work)->timer, \
269 delayed_work_timer_fn, \
270 (_tflags) | TIMER_IRQSAFE); \
271 } while (0)
272
273#define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \
274 do { \
275 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
276 __init_timer_on_stack(&(_work)->timer, \
277 delayed_work_timer_fn, \
278 (_tflags) | TIMER_IRQSAFE); \
279 } while (0)
280
281#define INIT_DELAYED_WORK(_work, _func) \
282 __INIT_DELAYED_WORK(_work, _func, 0)
283
284#define INIT_DELAYED_WORK_ONSTACK(_work, _func) \
285 __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
286
287#define INIT_DEFERRABLE_WORK(_work, _func) \
288 __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
289
290#define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \
291 __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
292
293#define INIT_RCU_WORK(_work, _func) \
294 INIT_WORK(&(_work)->work, (_func))
295
296#define INIT_RCU_WORK_ONSTACK(_work, _func) \
297 INIT_WORK_ONSTACK(&(_work)->work, (_func))
298
299
300
301
302
303#define work_pending(work) \
304 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
305
306
307
308
309
310
311#define delayed_work_pending(w) \
312 work_pending(&(w)->work)
313
314
315
316
317
318enum {
319 WQ_UNBOUND = 1 << 1,
320 WQ_FREEZABLE = 1 << 2,
321 WQ_MEM_RECLAIM = 1 << 3,
322 WQ_HIGHPRI = 1 << 4,
323 WQ_CPU_INTENSIVE = 1 << 5,
324 WQ_SYSFS = 1 << 6,
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351 WQ_POWER_EFFICIENT = 1 << 7,
352
353 __WQ_DRAINING = 1 << 16,
354 __WQ_ORDERED = 1 << 17,
355 __WQ_LEGACY = 1 << 18,
356 __WQ_ORDERED_EXPLICIT = 1 << 19,
357
358 WQ_MAX_ACTIVE = 512,
359 WQ_MAX_UNBOUND_PER_CPU = 4,
360 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
361};
362
363
364#define WQ_UNBOUND_MAX_ACTIVE \
365 max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395extern struct workqueue_struct *system_wq;
396extern struct workqueue_struct *system_highpri_wq;
397extern struct workqueue_struct *system_long_wq;
398extern struct workqueue_struct *system_unbound_wq;
399extern struct workqueue_struct *system_freezable_wq;
400extern struct workqueue_struct *system_power_efficient_wq;
401extern struct workqueue_struct *system_freezable_power_efficient_wq;
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417struct workqueue_struct *alloc_workqueue(const char *fmt,
418 unsigned int flags,
419 int max_active, ...);
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434#define alloc_ordered_workqueue(fmt, flags, args...) \
435 alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \
436 __WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
437
438#define create_workqueue(name) \
439 alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
440#define create_freezable_workqueue(name) \
441 alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \
442 WQ_MEM_RECLAIM, 1, (name))
443#define create_singlethread_workqueue(name) \
444 alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name)
445
446extern void destroy_workqueue(struct workqueue_struct *wq);
447
448struct workqueue_attrs *alloc_workqueue_attrs(void);
449void free_workqueue_attrs(struct workqueue_attrs *attrs);
450int apply_workqueue_attrs(struct workqueue_struct *wq,
451 const struct workqueue_attrs *attrs);
452int workqueue_set_unbound_cpumask(cpumask_var_t cpumask);
453
454extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
455 struct work_struct *work);
456extern bool queue_work_node(int node, struct workqueue_struct *wq,
457 struct work_struct *work);
458extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
459 struct delayed_work *work, unsigned long delay);
460extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
461 struct delayed_work *dwork, unsigned long delay);
462extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork);
463
464extern void flush_workqueue(struct workqueue_struct *wq);
465extern void drain_workqueue(struct workqueue_struct *wq);
466
467extern int schedule_on_each_cpu(work_func_t func);
468
469int execute_in_process_context(work_func_t fn, struct execute_work *);
470
471extern bool flush_work(struct work_struct *work);
472extern bool cancel_work_sync(struct work_struct *work);
473
474extern bool flush_delayed_work(struct delayed_work *dwork);
475extern bool cancel_delayed_work(struct delayed_work *dwork);
476extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
477
478extern bool flush_rcu_work(struct rcu_work *rwork);
479
480extern void workqueue_set_max_active(struct workqueue_struct *wq,
481 int max_active);
482extern struct work_struct *current_work(void);
483extern bool current_is_workqueue_rescuer(void);
484extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
485extern unsigned int work_busy(struct work_struct *work);
486extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
487extern void print_worker_info(const char *log_lvl, struct task_struct *task);
488extern void show_workqueue_state(void);
489extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
490
491
492
493
494
495
496
497
498
499
500
501static inline bool queue_work(struct workqueue_struct *wq,
502 struct work_struct *work)
503{
504 return queue_work_on(WORK_CPU_UNBOUND, wq, work);
505}
506
507
508
509
510
511
512
513
514
515static inline bool queue_delayed_work(struct workqueue_struct *wq,
516 struct delayed_work *dwork,
517 unsigned long delay)
518{
519 return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
520}
521
522
523
524
525
526
527
528
529
530static inline bool mod_delayed_work(struct workqueue_struct *wq,
531 struct delayed_work *dwork,
532 unsigned long delay)
533{
534 return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
535}
536
537
538
539
540
541
542
543
544static inline bool schedule_work_on(int cpu, struct work_struct *work)
545{
546 return queue_work_on(cpu, system_wq, work);
547}
548
549
550
551
552
553
554
555
556
557
558
559
560static inline bool schedule_work(struct work_struct *work)
561{
562 return queue_work(system_wq, work);
563}
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589static inline void flush_scheduled_work(void)
590{
591 flush_workqueue(system_wq);
592}
593
594
595
596
597
598
599
600
601
602
603static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
604 unsigned long delay)
605{
606 return queue_delayed_work_on(cpu, system_wq, dwork, delay);
607}
608
609
610
611
612
613
614
615
616
617static inline bool schedule_delayed_work(struct delayed_work *dwork,
618 unsigned long delay)
619{
620 return queue_delayed_work(system_wq, dwork, delay);
621}
622
623#ifndef CONFIG_SMP
624static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
625{
626 return fn(arg);
627}
628static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
629{
630 return fn(arg);
631}
632#else
633long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
634long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg);
635#endif
636
637#ifdef CONFIG_FREEZER
638extern void freeze_workqueues_begin(void);
639extern bool freeze_workqueues_busy(void);
640extern void thaw_workqueues(void);
641#endif
642
643#ifdef CONFIG_SYSFS
644int workqueue_sysfs_register(struct workqueue_struct *wq);
645#else
646static inline int workqueue_sysfs_register(struct workqueue_struct *wq)
647{ return 0; }
648#endif
649
650#ifdef CONFIG_WQ_WATCHDOG
651void wq_watchdog_touch(int cpu);
652#else
653static inline void wq_watchdog_touch(int cpu) { }
654#endif
655
656#ifdef CONFIG_SMP
657int workqueue_prepare_cpu(unsigned int cpu);
658int workqueue_online_cpu(unsigned int cpu);
659int workqueue_offline_cpu(unsigned int cpu);
660#endif
661
662int __init workqueue_init_early(void);
663int __init workqueue_init(void);
664
665#endif
666