linux/fs/io-wq.h
<<
>>
Prefs
   1#ifndef INTERNAL_IO_WQ_H
   2#define INTERNAL_IO_WQ_H
   3
   4#include <linux/refcount.h>
   5
   6struct io_wq;
   7
   8enum {
   9        IO_WQ_WORK_CANCEL       = 1,
  10        IO_WQ_WORK_HASHED       = 2,
  11        IO_WQ_WORK_UNBOUND      = 4,
  12        IO_WQ_WORK_CONCURRENT   = 16,
  13
  14        IO_WQ_HASH_SHIFT        = 24,   /* upper 8 bits are used for hash key */
  15};
  16
  17enum io_wq_cancel {
  18        IO_WQ_CANCEL_OK,        /* cancelled before started */
  19        IO_WQ_CANCEL_RUNNING,   /* found, running, and attempted cancelled */
  20        IO_WQ_CANCEL_NOTFOUND,  /* work not found */
  21};
  22
  23struct io_wq_work_node {
  24        struct io_wq_work_node *next;
  25};
  26
  27struct io_wq_work_list {
  28        struct io_wq_work_node *first;
  29        struct io_wq_work_node *last;
  30};
  31
  32static inline void wq_list_add_after(struct io_wq_work_node *node,
  33                                     struct io_wq_work_node *pos,
  34                                     struct io_wq_work_list *list)
  35{
  36        struct io_wq_work_node *next = pos->next;
  37
  38        pos->next = node;
  39        node->next = next;
  40        if (!next)
  41                list->last = node;
  42}
  43
  44static inline void wq_list_add_tail(struct io_wq_work_node *node,
  45                                    struct io_wq_work_list *list)
  46{
  47        if (!list->first) {
  48                list->last = node;
  49                WRITE_ONCE(list->first, node);
  50        } else {
  51                list->last->next = node;
  52                list->last = node;
  53        }
  54        node->next = NULL;
  55}
  56
  57static inline void wq_list_cut(struct io_wq_work_list *list,
  58                               struct io_wq_work_node *last,
  59                               struct io_wq_work_node *prev)
  60{
  61        /* first in the list, if prev==NULL */
  62        if (!prev)
  63                WRITE_ONCE(list->first, last->next);
  64        else
  65                prev->next = last->next;
  66
  67        if (last == list->last)
  68                list->last = prev;
  69        last->next = NULL;
  70}
  71
  72static inline void wq_list_del(struct io_wq_work_list *list,
  73                               struct io_wq_work_node *node,
  74                               struct io_wq_work_node *prev)
  75{
  76        wq_list_cut(list, node, prev);
  77}
  78
  79#define wq_list_for_each(pos, prv, head)                        \
  80        for (pos = (head)->first, prv = NULL; pos; prv = pos, pos = (pos)->next)
  81
  82#define wq_list_empty(list)     (READ_ONCE((list)->first) == NULL)
  83#define INIT_WQ_LIST(list)      do {                            \
  84        (list)->first = NULL;                                   \
  85        (list)->last = NULL;                                    \
  86} while (0)
  87
  88struct io_wq_work {
  89        struct io_wq_work_node list;
  90        unsigned flags;
  91};
  92
  93static inline struct io_wq_work *wq_next_work(struct io_wq_work *work)
  94{
  95        if (!work->list.next)
  96                return NULL;
  97
  98        return container_of(work->list.next, struct io_wq_work, list);
  99}
 100
 101typedef struct io_wq_work *(free_work_fn)(struct io_wq_work *);
 102typedef void (io_wq_work_fn)(struct io_wq_work *);
 103
 104struct io_wq_hash {
 105        refcount_t refs;
 106        unsigned long map;
 107        struct wait_queue_head wait;
 108};
 109
 110static inline void io_wq_put_hash(struct io_wq_hash *hash)
 111{
 112        if (refcount_dec_and_test(&hash->refs))
 113                kfree(hash);
 114}
 115
 116struct io_wq_data {
 117        struct io_wq_hash *hash;
 118        struct task_struct *task;
 119        io_wq_work_fn *do_work;
 120        free_work_fn *free_work;
 121};
 122
 123struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data);
 124void io_wq_exit_start(struct io_wq *wq);
 125void io_wq_put_and_exit(struct io_wq *wq);
 126
 127void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
 128void io_wq_hash_work(struct io_wq_work *work, void *val);
 129
 130int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask);
 131
 132static inline bool io_wq_is_hashed(struct io_wq_work *work)
 133{
 134        return work->flags & IO_WQ_WORK_HASHED;
 135}
 136
 137typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
 138
 139enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
 140                                        void *data, bool cancel_all);
 141
 142#if defined(CONFIG_IO_WQ)
 143extern void io_wq_worker_sleeping(struct task_struct *);
 144extern void io_wq_worker_running(struct task_struct *);
 145#else
 146static inline void io_wq_worker_sleeping(struct task_struct *tsk)
 147{
 148}
 149static inline void io_wq_worker_running(struct task_struct *tsk)
 150{
 151}
 152#endif
 153
 154static inline bool io_wq_current_is_worker(void)
 155{
 156        return in_task() && (current->flags & PF_IO_WORKER) &&
 157                current->pf_io_worker;
 158}
 159#endif
 160