1
2
3
4
5#ifndef _LINUX_WORKQUEUE_H
6#define _LINUX_WORKQUEUE_H
7
8#include <linux/timer.h>
9#include <linux/linkage.h>
10#include <linux/bitops.h>
11#include <linux/lockdep.h>
12#include <linux/threads.h>
13#include <linux/atomic.h>
14#include <linux/cpumask.h>
15
16struct workqueue_struct;
17
18struct work_struct;
19typedef void (*work_func_t)(struct work_struct *work);
20void delayed_work_timer_fn(unsigned long __data);
21
22
23
24
25
26#define work_data_bits(work) ((unsigned long *)(&(work)->data))
27
28enum {
29 WORK_STRUCT_PENDING_BIT = 0,
30 WORK_STRUCT_DELAYED_BIT = 1,
31 WORK_STRUCT_PWQ_BIT = 2,
32 WORK_STRUCT_LINKED_BIT = 3,
33#ifdef CONFIG_DEBUG_OBJECTS_WORK
34 WORK_STRUCT_STATIC_BIT = 4,
35 WORK_STRUCT_COLOR_SHIFT = 5,
36#else
37 WORK_STRUCT_COLOR_SHIFT = 4,
38#endif
39
40 WORK_STRUCT_COLOR_BITS = 4,
41
42 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
43 WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
44 WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT,
45 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
46#ifdef CONFIG_DEBUG_OBJECTS_WORK
47 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
48#else
49 WORK_STRUCT_STATIC = 0,
50#endif
51
52
53
54
55
56 WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1,
57 WORK_NO_COLOR = WORK_NR_COLORS,
58
59
60 WORK_CPU_UNBOUND = NR_CPUS,
61
62
63
64
65
66
67 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
68 WORK_STRUCT_COLOR_BITS,
69
70
71 WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT,
72
73 __WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE,
74 WORK_OFFQ_CANCELING = (1 << __WORK_OFFQ_CANCELING),
75
76
77
78
79
80
81 WORK_OFFQ_FLAG_BITS = 1,
82 WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
83 WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
84 WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
85 WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1,
86
87
88 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
89 WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
90 WORK_STRUCT_NO_POOL = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT,
91
92
93 WORK_BUSY_PENDING = 1 << 0,
94 WORK_BUSY_RUNNING = 1 << 1,
95
96
97 WORKER_DESC_LEN = 24,
98};
99
100struct work_struct {
101 atomic_long_t data;
102 struct list_head entry;
103 work_func_t func;
104#ifdef CONFIG_LOCKDEP
105 struct lockdep_map lockdep_map;
106#endif
107};
108
109#define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL)
110#define WORK_DATA_STATIC_INIT() \
111 ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC)
112
113struct delayed_work {
114 struct work_struct work;
115 struct timer_list timer;
116
117
118 struct workqueue_struct *wq;
119 int cpu;
120};
121
122
123
124
125
126
127
128
129
130struct workqueue_attrs {
131 int nice;
132 cpumask_var_t cpumask;
133 bool no_numa;
134};
135
136static inline struct delayed_work *to_delayed_work(struct work_struct *work)
137{
138 return container_of(work, struct delayed_work, work);
139}
140
141struct execute_work {
142 struct work_struct work;
143};
144
145#ifdef CONFIG_LOCKDEP
146
147
148
149
150
151#define __WORK_INIT_LOCKDEP_MAP(n, k) \
152 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
153#else
154#define __WORK_INIT_LOCKDEP_MAP(n, k)
155#endif
156
157#define __WORK_INITIALIZER(n, f) { \
158 .data = WORK_DATA_STATIC_INIT(), \
159 .entry = { &(n).entry, &(n).entry }, \
160 .func = (f), \
161 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
162 }
163
164#define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \
165 .work = __WORK_INITIALIZER((n).work, (f)), \
166 .timer = __TIMER_INITIALIZER(delayed_work_timer_fn, \
167 0, (unsigned long)&(n), \
168 (tflags) | TIMER_IRQSAFE), \
169 }
170
171#define DECLARE_WORK(n, f) \
172 struct work_struct n = __WORK_INITIALIZER(n, f)
173
174#define DECLARE_DELAYED_WORK(n, f) \
175 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
176
177#define DECLARE_DEFERRABLE_WORK(n, f) \
178 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
179
180#ifdef CONFIG_DEBUG_OBJECTS_WORK
181extern void __init_work(struct work_struct *work, int onstack);
182extern void destroy_work_on_stack(struct work_struct *work);
183extern void destroy_delayed_work_on_stack(struct delayed_work *work);
184static inline unsigned int work_static(struct work_struct *work)
185{
186 return *work_data_bits(work) & WORK_STRUCT_STATIC;
187}
188#else
189static inline void __init_work(struct work_struct *work, int onstack) { }
190static inline void destroy_work_on_stack(struct work_struct *work) { }
191static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { }
192static inline unsigned int work_static(struct work_struct *work) { return 0; }
193#endif
194
195
196
197
198
199
200
201
202#ifdef CONFIG_LOCKDEP
203#define __INIT_WORK(_work, _func, _onstack) \
204 do { \
205 static struct lock_class_key __key; \
206 \
207 __init_work((_work), _onstack); \
208 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
209 lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \
210 INIT_LIST_HEAD(&(_work)->entry); \
211 (_work)->func = (_func); \
212 } while (0)
213#else
214#define __INIT_WORK(_work, _func, _onstack) \
215 do { \
216 __init_work((_work), _onstack); \
217 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
218 INIT_LIST_HEAD(&(_work)->entry); \
219 (_work)->func = (_func); \
220 } while (0)
221#endif
222
223#define INIT_WORK(_work, _func) \
224 __INIT_WORK((_work), (_func), 0)
225
226#define INIT_WORK_ONSTACK(_work, _func) \
227 __INIT_WORK((_work), (_func), 1)
228
229#define __INIT_DELAYED_WORK(_work, _func, _tflags) \
230 do { \
231 INIT_WORK(&(_work)->work, (_func)); \
232 __setup_timer(&(_work)->timer, delayed_work_timer_fn, \
233 (unsigned long)(_work), \
234 (_tflags) | TIMER_IRQSAFE); \
235 } while (0)
236
237#define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \
238 do { \
239 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
240 __setup_timer_on_stack(&(_work)->timer, \
241 delayed_work_timer_fn, \
242 (unsigned long)(_work), \
243 (_tflags) | TIMER_IRQSAFE); \
244 } while (0)
245
246#define INIT_DELAYED_WORK(_work, _func) \
247 __INIT_DELAYED_WORK(_work, _func, 0)
248
249#define INIT_DELAYED_WORK_ONSTACK(_work, _func) \
250 __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
251
252#define INIT_DEFERRABLE_WORK(_work, _func) \
253 __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
254
255#define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \
256 __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
257
258
259
260
261
262#define work_pending(work) \
263 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
264
265
266
267
268
269
270#define delayed_work_pending(w) \
271 work_pending(&(w)->work)
272
273
274
275
276
277enum {
278 WQ_UNBOUND = 1 << 1,
279 WQ_FREEZABLE = 1 << 2,
280 WQ_MEM_RECLAIM = 1 << 3,
281 WQ_HIGHPRI = 1 << 4,
282 WQ_CPU_INTENSIVE = 1 << 5,
283 WQ_SYSFS = 1 << 6,
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310 WQ_POWER_EFFICIENT = 1 << 7,
311
312 __WQ_DRAINING = 1 << 16,
313 __WQ_ORDERED = 1 << 17,
314
315 WQ_MAX_ACTIVE = 512,
316 WQ_MAX_UNBOUND_PER_CPU = 4,
317 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
318};
319
320
321#define WQ_UNBOUND_MAX_ACTIVE \
322 max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352extern struct workqueue_struct *system_wq;
353extern struct workqueue_struct *system_highpri_wq;
354extern struct workqueue_struct *system_long_wq;
355extern struct workqueue_struct *system_unbound_wq;
356extern struct workqueue_struct *system_freezable_wq;
357extern struct workqueue_struct *system_power_efficient_wq;
358extern struct workqueue_struct *system_freezable_power_efficient_wq;
359
360extern struct workqueue_struct *
361__alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
362 struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6);
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380#ifdef CONFIG_LOCKDEP
381#define alloc_workqueue(fmt, flags, max_active, args...) \
382({ \
383 static struct lock_class_key __key; \
384 const char *__lock_name; \
385 \
386 __lock_name = #fmt#args; \
387 \
388 __alloc_workqueue_key((fmt), (flags), (max_active), \
389 &__key, __lock_name, ##args); \
390})
391#else
392#define alloc_workqueue(fmt, flags, max_active, args...) \
393 __alloc_workqueue_key((fmt), (flags), (max_active), \
394 NULL, NULL, ##args)
395#endif
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410#define alloc_ordered_workqueue(fmt, flags, args...) \
411 alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
412
413#define create_workqueue(name) \
414 alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, (name))
415#define create_freezable_workqueue(name) \
416 alloc_workqueue("%s", WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, \
417 1, (name))
418#define create_singlethread_workqueue(name) \
419 alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name)
420
421extern void destroy_workqueue(struct workqueue_struct *wq);
422
423struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask);
424void free_workqueue_attrs(struct workqueue_attrs *attrs);
425int apply_workqueue_attrs(struct workqueue_struct *wq,
426 const struct workqueue_attrs *attrs);
427
428extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
429 struct work_struct *work);
430extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
431 struct delayed_work *work, unsigned long delay);
432extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
433 struct delayed_work *dwork, unsigned long delay);
434
435extern void flush_workqueue(struct workqueue_struct *wq);
436extern void drain_workqueue(struct workqueue_struct *wq);
437extern void flush_scheduled_work(void);
438
439extern int schedule_on_each_cpu(work_func_t func);
440
441int execute_in_process_context(work_func_t fn, struct execute_work *);
442
443extern bool flush_work(struct work_struct *work);
444extern bool cancel_work_sync(struct work_struct *work);
445
446extern bool flush_delayed_work(struct delayed_work *dwork);
447extern bool cancel_delayed_work(struct delayed_work *dwork);
448extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
449
450extern void workqueue_set_max_active(struct workqueue_struct *wq,
451 int max_active);
452extern bool current_is_workqueue_rescuer(void);
453extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
454extern unsigned int work_busy(struct work_struct *work);
455extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
456extern void print_worker_info(const char *log_lvl, struct task_struct *task);
457
458
459
460
461
462
463
464
465
466
467
468static inline bool queue_work(struct workqueue_struct *wq,
469 struct work_struct *work)
470{
471 return queue_work_on(WORK_CPU_UNBOUND, wq, work);
472}
473
474
475
476
477
478
479
480
481
482static inline bool queue_delayed_work(struct workqueue_struct *wq,
483 struct delayed_work *dwork,
484 unsigned long delay)
485{
486 return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
487}
488
489
490
491
492
493
494
495
496
497static inline bool mod_delayed_work(struct workqueue_struct *wq,
498 struct delayed_work *dwork,
499 unsigned long delay)
500{
501 return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
502}
503
504
505
506
507
508
509
510
511static inline bool schedule_work_on(int cpu, struct work_struct *work)
512{
513 return queue_work_on(cpu, system_wq, work);
514}
515
516
517
518
519
520
521
522
523
524
525
526
527static inline bool schedule_work(struct work_struct *work)
528{
529 return queue_work(system_wq, work);
530}
531
532
533
534
535
536
537
538
539
540
541static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
542 unsigned long delay)
543{
544 return queue_delayed_work_on(cpu, system_wq, dwork, delay);
545}
546
547
548
549
550
551
552
553
554
555static inline bool schedule_delayed_work(struct delayed_work *dwork,
556 unsigned long delay)
557{
558 return queue_delayed_work(system_wq, dwork, delay);
559}
560
561
562
563
564static inline bool keventd_up(void)
565{
566 return system_wq != NULL;
567}
568
569#ifndef CONFIG_SMP
570static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
571{
572 return fn(arg);
573}
574#else
575long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
576#endif
577
578#ifdef CONFIG_FREEZER
579extern void freeze_workqueues_begin(void);
580extern bool freeze_workqueues_busy(void);
581extern void thaw_workqueues(void);
582#endif
583
584#ifdef CONFIG_SYSFS
585int workqueue_sysfs_register(struct workqueue_struct *wq);
586#else
587static inline int workqueue_sysfs_register(struct workqueue_struct *wq)
588{ return 0; }
589#endif
590
591#endif
592