linux/include/linux/workqueue.h
<<
>>
Prefs
   1/*
   2 * workqueue.h --- work queue handling for Linux.
   3 */
   4
   5#ifndef _LINUX_WORKQUEUE_H
   6#define _LINUX_WORKQUEUE_H
   7
   8#include <linux/timer.h>
   9#include <linux/linkage.h>
  10#include <linux/bitops.h>
  11#include <linux/lockdep.h>
  12#include <asm/atomic.h>
  13
  14struct workqueue_struct;
  15
  16struct work_struct;
  17typedef void (*work_func_t)(struct work_struct *work);
  18
  19/*
  20 * The first word is the work queue pointer and the flags rolled into
  21 * one
  22 */
  23#define work_data_bits(work) ((unsigned long *)(&(work)->data))
  24
  25struct work_struct {
  26        atomic_long_t data;
  27#define WORK_STRUCT_PENDING 0           /* T if work item pending execution */
  28#define WORK_STRUCT_FLAG_MASK (3UL)
  29#define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK)
  30        struct list_head entry;
  31        work_func_t func;
  32#ifdef CONFIG_LOCKDEP
  33        struct lockdep_map lockdep_map;
  34#endif
  35};
  36
  37#define WORK_DATA_INIT()        ATOMIC_LONG_INIT(0)
  38
  39struct delayed_work {
  40        struct work_struct work;
  41        struct timer_list timer;
  42};
  43
  44static inline struct delayed_work *to_delayed_work(struct work_struct *work)
  45{
  46        return container_of(work, struct delayed_work, work);
  47}
  48
  49struct execute_work {
  50        struct work_struct work;
  51};
  52
  53#ifdef CONFIG_LOCKDEP
  54/*
  55 * NB: because we have to copy the lockdep_map, setting _key
  56 * here is required, otherwise it could get initialised to the
  57 * copy of the lockdep_map!
  58 */
  59#define __WORK_INIT_LOCKDEP_MAP(n, k) \
  60        .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
  61#else
  62#define __WORK_INIT_LOCKDEP_MAP(n, k)
  63#endif
  64
  65#define __WORK_INITIALIZER(n, f) {                              \
  66        .data = WORK_DATA_INIT(),                               \
  67        .entry  = { &(n).entry, &(n).entry },                   \
  68        .func = (f),                                            \
  69        __WORK_INIT_LOCKDEP_MAP(#n, &(n))                       \
  70        }
  71
  72#define __DELAYED_WORK_INITIALIZER(n, f) {                      \
  73        .work = __WORK_INITIALIZER((n).work, (f)),              \
  74        .timer = TIMER_INITIALIZER(NULL, 0, 0),                 \
  75        }
  76
  77#define DECLARE_WORK(n, f)                                      \
  78        struct work_struct n = __WORK_INITIALIZER(n, f)
  79
  80#define DECLARE_DELAYED_WORK(n, f)                              \
  81        struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)
  82
  83/*
  84 * initialize a work item's function pointer
  85 */
  86#define PREPARE_WORK(_work, _func)                              \
  87        do {                                                    \
  88                (_work)->func = (_func);                        \
  89        } while (0)
  90
  91#define PREPARE_DELAYED_WORK(_work, _func)                      \
  92        PREPARE_WORK(&(_work)->work, (_func))
  93
  94/*
  95 * initialize all of a work item in one go
  96 *
  97 * NOTE! No point in using "atomic_long_set()": using a direct
  98 * assignment of the work data initializer allows the compiler
  99 * to generate better code.
 100 */
 101#ifdef CONFIG_LOCKDEP
 102#define INIT_WORK(_work, _func)                                         \
 103        do {                                                            \
 104                static struct lock_class_key __key;                     \
 105                                                                        \
 106                (_work)->data = (atomic_long_t) WORK_DATA_INIT();       \
 107                lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0);\
 108                INIT_LIST_HEAD(&(_work)->entry);                        \
 109                PREPARE_WORK((_work), (_func));                         \
 110        } while (0)
 111#else
 112#define INIT_WORK(_work, _func)                                         \
 113        do {                                                            \
 114                (_work)->data = (atomic_long_t) WORK_DATA_INIT();       \
 115                INIT_LIST_HEAD(&(_work)->entry);                        \
 116                PREPARE_WORK((_work), (_func));                         \
 117        } while (0)
 118#endif
 119
 120#define INIT_DELAYED_WORK(_work, _func)                         \
 121        do {                                                    \
 122                INIT_WORK(&(_work)->work, (_func));             \
 123                init_timer(&(_work)->timer);                    \
 124        } while (0)
 125
 126#define INIT_DELAYED_WORK_ON_STACK(_work, _func)                \
 127        do {                                                    \
 128                INIT_WORK(&(_work)->work, (_func));             \
 129                init_timer_on_stack(&(_work)->timer);           \
 130        } while (0)
 131
 132#define INIT_DELAYED_WORK_DEFERRABLE(_work, _func)                      \
 133        do {                                                    \
 134                INIT_WORK(&(_work)->work, (_func));             \
 135                init_timer_deferrable(&(_work)->timer);         \
 136        } while (0)
 137
 138#define INIT_DELAYED_WORK_ON_STACK(_work, _func)                \
 139        do {                                                    \
 140                INIT_WORK(&(_work)->work, (_func));             \
 141                init_timer_on_stack(&(_work)->timer);           \
 142        } while (0)
 143
 144/**
 145 * work_pending - Find out whether a work item is currently pending
 146 * @work: The work item in question
 147 */
 148#define work_pending(work) \
 149        test_bit(WORK_STRUCT_PENDING, work_data_bits(work))
 150
 151/**
 152 * delayed_work_pending - Find out whether a delayable work item is currently
 153 * pending
 154 * @work: The work item in question
 155 */
 156#define delayed_work_pending(w) \
 157        work_pending(&(w)->work)
 158
 159/**
 160 * work_clear_pending - for internal use only, mark a work item as not pending
 161 * @work: The work item in question
 162 */
 163#define work_clear_pending(work) \
 164        clear_bit(WORK_STRUCT_PENDING, work_data_bits(work))
 165
 166
 167extern struct workqueue_struct *
 168__create_workqueue_key(const char *name, int singlethread,
 169                       int freezeable, int rt, struct lock_class_key *key,
 170                       const char *lock_name);
 171
 172#ifdef CONFIG_LOCKDEP
 173#define __create_workqueue(name, singlethread, freezeable, rt)  \
 174({                                                              \
 175        static struct lock_class_key __key;                     \
 176        const char *__lock_name;                                \
 177                                                                \
 178        if (__builtin_constant_p(name))                         \
 179                __lock_name = (name);                           \
 180        else                                                    \
 181                __lock_name = #name;                            \
 182                                                                \
 183        __create_workqueue_key((name), (singlethread),          \
 184                               (freezeable), (rt), &__key,      \
 185                               __lock_name);                    \
 186})
 187#else
 188#define __create_workqueue(name, singlethread, freezeable, rt)  \
 189        __create_workqueue_key((name), (singlethread), (freezeable), (rt), \
 190                               NULL, NULL)
 191#endif
 192
 193#define create_workqueue(name) __create_workqueue((name), 0, 0, 0)
 194#define create_rt_workqueue(name) __create_workqueue((name), 0, 0, 1)
 195#define create_freezeable_workqueue(name) __create_workqueue((name), 1, 1, 0)
 196#define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0, 0)
 197
 198extern void destroy_workqueue(struct workqueue_struct *wq);
 199
 200extern int queue_work(struct workqueue_struct *wq, struct work_struct *work);
 201extern int queue_work_on(int cpu, struct workqueue_struct *wq,
 202                        struct work_struct *work);
 203extern int queue_delayed_work(struct workqueue_struct *wq,
 204                        struct delayed_work *work, unsigned long delay);
 205extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
 206                        struct delayed_work *work, unsigned long delay);
 207
 208extern void flush_workqueue(struct workqueue_struct *wq);
 209extern void flush_scheduled_work(void);
 210extern void flush_delayed_work(struct delayed_work *work);
 211
 212extern int schedule_work(struct work_struct *work);
 213extern int schedule_work_on(int cpu, struct work_struct *work);
 214extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay);
 215extern int schedule_delayed_work_on(int cpu, struct delayed_work *work,
 216                                        unsigned long delay);
 217extern int schedule_on_each_cpu(work_func_t func);
 218extern int current_is_keventd(void);
 219extern int keventd_up(void);
 220
 221extern void init_workqueues(void);
 222int execute_in_process_context(work_func_t fn, struct execute_work *);
 223
 224extern int flush_work(struct work_struct *work);
 225
 226extern int cancel_work_sync(struct work_struct *work);
 227
 228/*
 229 * Kill off a pending schedule_delayed_work().  Note that the work callback
 230 * function may still be running on return from cancel_delayed_work(), unless
 231 * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
 232 * cancel_work_sync() to wait on it.
 233 */
 234static inline int cancel_delayed_work(struct delayed_work *work)
 235{
 236        int ret;
 237
 238        ret = del_timer_sync(&work->timer);
 239        if (ret)
 240                work_clear_pending(&work->work);
 241        return ret;
 242}
 243
 244/*
 245 * Like above, but uses del_timer() instead of del_timer_sync(). This means,
 246 * if it returns 0 the timer function may be running and the queueing is in
 247 * progress.
 248 */
 249static inline int __cancel_delayed_work(struct delayed_work *work)
 250{
 251        int ret;
 252
 253        ret = del_timer(&work->timer);
 254        if (ret)
 255                work_clear_pending(&work->work);
 256        return ret;
 257}
 258
 259extern int cancel_delayed_work_sync(struct delayed_work *work);
 260
 261/* Obsolete. use cancel_delayed_work_sync() */
 262static inline
 263void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
 264                                        struct delayed_work *work)
 265{
 266        cancel_delayed_work_sync(work);
 267}
 268
 269/* Obsolete. use cancel_delayed_work_sync() */
 270static inline
 271void cancel_rearming_delayed_work(struct delayed_work *work)
 272{
 273        cancel_delayed_work_sync(work);
 274}
 275
 276#ifndef CONFIG_SMP
 277static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
 278{
 279        return fn(arg);
 280}
 281#else
 282long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg);
 283#endif /* CONFIG_SMP */
 284#endif
 285