1
2
3
4
5#ifndef _LINUX_WORKQUEUE_H
6#define _LINUX_WORKQUEUE_H
7
8#include <linux/timer.h>
9#include <linux/linkage.h>
10#include <linux/bitops.h>
11#include <linux/lockdep.h>
12#include <linux/threads.h>
13#include <linux/atomic.h>
14#include <linux/cpumask.h>
15
16struct workqueue_struct;
17
18struct work_struct;
19typedef void (*work_func_t)(struct work_struct *work);
20void delayed_work_timer_fn(unsigned long __data);
21
22
23
24
25
26#define work_data_bits(work) ((unsigned long *)(&(work)->data))
27
28enum {
29 WORK_STRUCT_PENDING_BIT = 0,
30 WORK_STRUCT_DELAYED_BIT = 1,
31 WORK_STRUCT_PWQ_BIT = 2,
32 WORK_STRUCT_LINKED_BIT = 3,
33#ifdef CONFIG_DEBUG_OBJECTS_WORK
34 WORK_STRUCT_STATIC_BIT = 4,
35 WORK_STRUCT_COLOR_SHIFT = 5,
36#else
37 WORK_STRUCT_COLOR_SHIFT = 4,
38#endif
39
40 WORK_STRUCT_COLOR_BITS = 4,
41
42 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
43 WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
44 WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT,
45 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
46#ifdef CONFIG_DEBUG_OBJECTS_WORK
47 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
48#else
49 WORK_STRUCT_STATIC = 0,
50#endif
51
52
53
54
55
56 WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1,
57 WORK_NO_COLOR = WORK_NR_COLORS,
58
59
60 WORK_CPU_UNBOUND = NR_CPUS,
61 WORK_CPU_END = NR_CPUS + 1,
62
63
64
65
66
67
68 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
69 WORK_STRUCT_COLOR_BITS,
70
71
72 WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT,
73
74 WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE),
75
76
77
78
79
80
81 WORK_OFFQ_FLAG_BITS = 1,
82 WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
83 WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
84 WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
85 WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1,
86
87
88 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
89 WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
90 WORK_STRUCT_NO_POOL = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT,
91
92
93 WORK_BUSY_PENDING = 1 << 0,
94 WORK_BUSY_RUNNING = 1 << 1,
95
96
97 WORKER_DESC_LEN = 24,
98};
99
100struct work_struct {
101 atomic_long_t data;
102 struct list_head entry;
103 work_func_t func;
104#ifdef CONFIG_LOCKDEP
105 struct lockdep_map lockdep_map;
106#endif
107};
108
109#define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL)
110#define WORK_DATA_STATIC_INIT() \
111 ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC)
112
113struct delayed_work {
114 struct work_struct work;
115 struct timer_list timer;
116
117
118 struct workqueue_struct *wq;
119 int cpu;
120};
121
122
123
124
125
126
127
128
129
130struct workqueue_attrs {
131 int nice;
132 cpumask_var_t cpumask;
133 bool no_numa;
134};
135
136static inline struct delayed_work *to_delayed_work(struct work_struct *work)
137{
138 return container_of(work, struct delayed_work, work);
139}
140
141struct execute_work {
142 struct work_struct work;
143};
144
145#ifdef CONFIG_LOCKDEP
146
147
148
149
150
151#define __WORK_INIT_LOCKDEP_MAP(n, k) \
152 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
153#else
154#define __WORK_INIT_LOCKDEP_MAP(n, k)
155#endif
156
157#define __WORK_INITIALIZER(n, f) { \
158 .data = WORK_DATA_STATIC_INIT(), \
159 .entry = { &(n).entry, &(n).entry }, \
160 .func = (f), \
161 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
162 }
163
164#define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \
165 .work = __WORK_INITIALIZER((n).work, (f)), \
166 .timer = __TIMER_INITIALIZER(delayed_work_timer_fn, \
167 0, (unsigned long)&(n), \
168 (tflags) | TIMER_IRQSAFE), \
169 }
170
171#define DECLARE_WORK(n, f) \
172 struct work_struct n = __WORK_INITIALIZER(n, f)
173
174#define DECLARE_DELAYED_WORK(n, f) \
175 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
176
177#define DECLARE_DEFERRABLE_WORK(n, f) \
178 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
179
180
181
182
183#define PREPARE_WORK(_work, _func) \
184 do { \
185 (_work)->func = (_func); \
186 } while (0)
187
188#define PREPARE_DELAYED_WORK(_work, _func) \
189 PREPARE_WORK(&(_work)->work, (_func))
190
191#ifdef CONFIG_DEBUG_OBJECTS_WORK
192extern void __init_work(struct work_struct *work, int onstack);
193extern void destroy_work_on_stack(struct work_struct *work);
194extern void destroy_delayed_work_on_stack(struct delayed_work *work);
195static inline unsigned int work_static(struct work_struct *work)
196{
197 return *work_data_bits(work) & WORK_STRUCT_STATIC;
198}
199#else
200static inline void __init_work(struct work_struct *work, int onstack) { }
201static inline void destroy_work_on_stack(struct work_struct *work) { }
202static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { }
203static inline unsigned int work_static(struct work_struct *work) { return 0; }
204#endif
205
206
207
208
209
210
211
212
213#ifdef CONFIG_LOCKDEP
214#define __INIT_WORK(_work, _func, _onstack) \
215 do { \
216 static struct lock_class_key __key; \
217 \
218 __init_work((_work), _onstack); \
219 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
220 lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \
221 INIT_LIST_HEAD(&(_work)->entry); \
222 PREPARE_WORK((_work), (_func)); \
223 } while (0)
224#else
225#define __INIT_WORK(_work, _func, _onstack) \
226 do { \
227 __init_work((_work), _onstack); \
228 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
229 INIT_LIST_HEAD(&(_work)->entry); \
230 PREPARE_WORK((_work), (_func)); \
231 } while (0)
232#endif
233
234#define INIT_WORK(_work, _func) \
235 do { \
236 __INIT_WORK((_work), (_func), 0); \
237 } while (0)
238
239#define INIT_WORK_ONSTACK(_work, _func) \
240 do { \
241 __INIT_WORK((_work), (_func), 1); \
242 } while (0)
243
244#define __INIT_DELAYED_WORK(_work, _func, _tflags) \
245 do { \
246 INIT_WORK(&(_work)->work, (_func)); \
247 __setup_timer(&(_work)->timer, delayed_work_timer_fn, \
248 (unsigned long)(_work), \
249 (_tflags) | TIMER_IRQSAFE); \
250 } while (0)
251
252#define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \
253 do { \
254 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
255 __setup_timer_on_stack(&(_work)->timer, \
256 delayed_work_timer_fn, \
257 (unsigned long)(_work), \
258 (_tflags) | TIMER_IRQSAFE); \
259 } while (0)
260
261#define INIT_DELAYED_WORK(_work, _func) \
262 __INIT_DELAYED_WORK(_work, _func, 0)
263
264#define INIT_DELAYED_WORK_ONSTACK(_work, _func) \
265 __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
266
267#define INIT_DEFERRABLE_WORK(_work, _func) \
268 __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
269
270#define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \
271 __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
272
273
274
275
276
277#define work_pending(work) \
278 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
279
280
281
282
283
284
285#define delayed_work_pending(w) \
286 work_pending(&(w)->work)
287
288
289
290
291
292#define work_clear_pending(work) \
293 clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
294
295
296
297
298
299enum {
300 WQ_NON_REENTRANT = 1 << 0,
301 WQ_UNBOUND = 1 << 1,
302 WQ_FREEZABLE = 1 << 2,
303 WQ_MEM_RECLAIM = 1 << 3,
304 WQ_HIGHPRI = 1 << 4,
305 WQ_CPU_INTENSIVE = 1 << 5,
306 WQ_SYSFS = 1 << 6,
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333 WQ_POWER_EFFICIENT = 1 << 7,
334
335 __WQ_DRAINING = 1 << 16,
336 __WQ_ORDERED = 1 << 17,
337
338 WQ_MAX_ACTIVE = 512,
339 WQ_MAX_UNBOUND_PER_CPU = 4,
340 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
341};
342
343
344#define WQ_UNBOUND_MAX_ACTIVE \
345 max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375extern struct workqueue_struct *system_wq;
376extern struct workqueue_struct *system_highpri_wq;
377extern struct workqueue_struct *system_long_wq;
378extern struct workqueue_struct *system_unbound_wq;
379extern struct workqueue_struct *system_freezable_wq;
380extern struct workqueue_struct *system_power_efficient_wq;
381extern struct workqueue_struct *system_freezable_power_efficient_wq;
382
383static inline struct workqueue_struct * __deprecated __system_nrt_wq(void)
384{
385 return system_wq;
386}
387
388static inline struct workqueue_struct * __deprecated __system_nrt_freezable_wq(void)
389{
390 return system_freezable_wq;
391}
392
393
394#define system_nrt_wq __system_nrt_wq()
395#define system_nrt_freezable_wq __system_nrt_freezable_wq()
396
397extern struct workqueue_struct *
398__alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
399 struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6);
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417#ifdef CONFIG_LOCKDEP
418#define alloc_workqueue(fmt, flags, max_active, args...) \
419({ \
420 static struct lock_class_key __key; \
421 const char *__lock_name; \
422 \
423 if (__builtin_constant_p(fmt)) \
424 __lock_name = (fmt); \
425 else \
426 __lock_name = #fmt; \
427 \
428 __alloc_workqueue_key((fmt), (flags), (max_active), \
429 &__key, __lock_name, ##args); \
430})
431#else
432#define alloc_workqueue(fmt, flags, max_active, args...) \
433 __alloc_workqueue_key((fmt), (flags), (max_active), \
434 NULL, NULL, ##args)
435#endif
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450#define alloc_ordered_workqueue(fmt, flags, args...) \
451 alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
452
453#define create_workqueue(name) \
454 alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, (name))
455#define create_freezable_workqueue(name) \
456 alloc_workqueue("%s", WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, \
457 1, (name))
458#define create_singlethread_workqueue(name) \
459 alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, (name))
460
461extern void destroy_workqueue(struct workqueue_struct *wq);
462
463struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask);
464void free_workqueue_attrs(struct workqueue_attrs *attrs);
465int apply_workqueue_attrs(struct workqueue_struct *wq,
466 const struct workqueue_attrs *attrs);
467
468extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
469 struct work_struct *work);
470extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
471 struct delayed_work *work, unsigned long delay);
472extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
473 struct delayed_work *dwork, unsigned long delay);
474
475extern void flush_workqueue(struct workqueue_struct *wq);
476extern void drain_workqueue(struct workqueue_struct *wq);
477extern void flush_scheduled_work(void);
478
479extern int schedule_on_each_cpu(work_func_t func);
480
481int execute_in_process_context(work_func_t fn, struct execute_work *);
482
483extern bool flush_work(struct work_struct *work);
484extern bool cancel_work_sync(struct work_struct *work);
485
486extern bool flush_delayed_work(struct delayed_work *dwork);
487extern bool cancel_delayed_work(struct delayed_work *dwork);
488extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
489
490extern void workqueue_set_max_active(struct workqueue_struct *wq,
491 int max_active);
492extern bool current_is_workqueue_rescuer(void);
493extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
494extern unsigned int work_busy(struct work_struct *work);
495extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
496extern void print_worker_info(const char *log_lvl, struct task_struct *task);
497
498
499
500
501
502
503
504
505
506
507
508static inline bool queue_work(struct workqueue_struct *wq,
509 struct work_struct *work)
510{
511 return queue_work_on(WORK_CPU_UNBOUND, wq, work);
512}
513
514
515
516
517
518
519
520
521
522static inline bool queue_delayed_work(struct workqueue_struct *wq,
523 struct delayed_work *dwork,
524 unsigned long delay)
525{
526 return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
527}
528
529
530
531
532
533
534
535
536
537static inline bool mod_delayed_work(struct workqueue_struct *wq,
538 struct delayed_work *dwork,
539 unsigned long delay)
540{
541 return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
542}
543
544
545
546
547
548
549
550
551static inline bool schedule_work_on(int cpu, struct work_struct *work)
552{
553 return queue_work_on(cpu, system_wq, work);
554}
555
556
557
558
559
560
561
562
563
564
565
566
567static inline bool schedule_work(struct work_struct *work)
568{
569 return queue_work(system_wq, work);
570}
571
572
573
574
575
576
577
578
579
580
581static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
582 unsigned long delay)
583{
584 return queue_delayed_work_on(cpu, system_wq, dwork, delay);
585}
586
587
588
589
590
591
592
593
594
595static inline bool schedule_delayed_work(struct delayed_work *dwork,
596 unsigned long delay)
597{
598 return queue_delayed_work(system_wq, dwork, delay);
599}
600
601
602
603
604static inline bool keventd_up(void)
605{
606 return system_wq != NULL;
607}
608
609
610
611
612
613
614static inline bool __deprecated __cancel_delayed_work(struct delayed_work *work)
615{
616 bool ret;
617
618 ret = del_timer(&work->timer);
619 if (ret)
620 work_clear_pending(&work->work);
621 return ret;
622}
623
624
625static inline bool __deprecated flush_work_sync(struct work_struct *work)
626{
627 return flush_work(work);
628}
629
630
631static inline bool __deprecated flush_delayed_work_sync(struct delayed_work *dwork)
632{
633 return flush_delayed_work(dwork);
634}
635
636#ifndef CONFIG_SMP
637static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
638{
639 return fn(arg);
640}
641#else
642long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
643#endif
644
645#ifdef CONFIG_FREEZER
646extern void freeze_workqueues_begin(void);
647extern bool freeze_workqueues_busy(void);
648extern void thaw_workqueues(void);
649#endif
650
651#ifdef CONFIG_SYSFS
652int workqueue_sysfs_register(struct workqueue_struct *wq);
653#else
654static inline int workqueue_sysfs_register(struct workqueue_struct *wq)
655{ return 0; }
656#endif
657
658#ifdef CONFIG_WQ_WATCHDOG
659void wq_watchdog_touch(int cpu);
660#else
661static inline void wq_watchdog_touch(int cpu) { }
662#endif
663
664#endif
665