linux/drivers/md/bcache/closure.c
<<
>>
Prefs
   1/*
   2 * Asynchronous refcounty things
   3 *
   4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
   5 * Copyright 2012 Google, Inc.
   6 */
   7
   8#include <linux/debugfs.h>
   9#include <linux/module.h>
  10#include <linux/seq_file.h>
  11
  12#include "closure.h"
  13
  14void closure_queue(struct closure *cl)
  15{
  16        struct workqueue_struct *wq = cl->wq;
  17        if (wq) {
  18                INIT_WORK(&cl->work, cl->work.func);
  19                BUG_ON(!queue_work(wq, &cl->work));
  20        } else
  21                cl->fn(cl);
  22}
  23EXPORT_SYMBOL_GPL(closure_queue);
  24
  25#define CL_FIELD(type, field)                                   \
  26        case TYPE_ ## type:                                     \
  27        return &container_of(cl, struct type, cl)->field
  28
  29static struct closure_waitlist *closure_waitlist(struct closure *cl)
  30{
  31        switch (cl->type) {
  32                CL_FIELD(closure_with_waitlist, wait);
  33                CL_FIELD(closure_with_waitlist_and_timer, wait);
  34        default:
  35                return NULL;
  36        }
  37}
  38
  39static struct timer_list *closure_timer(struct closure *cl)
  40{
  41        switch (cl->type) {
  42                CL_FIELD(closure_with_timer, timer);
  43                CL_FIELD(closure_with_waitlist_and_timer, timer);
  44        default:
  45                return NULL;
  46        }
  47}
  48
  49static inline void closure_put_after_sub(struct closure *cl, int flags)
  50{
  51        int r = flags & CLOSURE_REMAINING_MASK;
  52
  53        BUG_ON(flags & CLOSURE_GUARD_MASK);
  54        BUG_ON(!r && (flags & ~(CLOSURE_DESTRUCTOR|CLOSURE_BLOCKING)));
  55
  56        /* Must deliver precisely one wakeup */
  57        if (r == 1 && (flags & CLOSURE_SLEEPING))
  58                wake_up_process(cl->task);
  59
  60        if (!r) {
  61                if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) {
  62                        /* CLOSURE_BLOCKING might be set - clear it */
  63                        atomic_set(&cl->remaining,
  64                                   CLOSURE_REMAINING_INITIALIZER);
  65                        closure_queue(cl);
  66                } else {
  67                        struct closure *parent = cl->parent;
  68                        struct closure_waitlist *wait = closure_waitlist(cl);
  69                        closure_fn *destructor = cl->fn;
  70
  71                        closure_debug_destroy(cl);
  72
  73                        smp_mb();
  74                        atomic_set(&cl->remaining, -1);
  75
  76                        if (wait)
  77                                closure_wake_up(wait);
  78
  79                        if (destructor)
  80                                destructor(cl);
  81
  82                        if (parent)
  83                                closure_put(parent);
  84                }
  85        }
  86}
  87
  88/* For clearing flags with the same atomic op as a put */
  89void closure_sub(struct closure *cl, int v)
  90{
  91        closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining));
  92}
  93EXPORT_SYMBOL_GPL(closure_sub);
  94
  95void closure_put(struct closure *cl)
  96{
  97        closure_put_after_sub(cl, atomic_dec_return(&cl->remaining));
  98}
  99EXPORT_SYMBOL_GPL(closure_put);
 100
 101static void set_waiting(struct closure *cl, unsigned long f)
 102{
 103#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
 104        cl->waiting_on = f;
 105#endif
 106}
 107
 108void __closure_wake_up(struct closure_waitlist *wait_list)
 109{
 110        struct llist_node *list;
 111        struct closure *cl;
 112        struct llist_node *reverse = NULL;
 113
 114        list = llist_del_all(&wait_list->list);
 115
 116        /* We first reverse the list to preserve FIFO ordering and fairness */
 117
 118        while (list) {
 119                struct llist_node *t = list;
 120                list = llist_next(list);
 121
 122                t->next = reverse;
 123                reverse = t;
 124        }
 125
 126        /* Then do the wakeups */
 127
 128        while (reverse) {
 129                cl = container_of(reverse, struct closure, list);
 130                reverse = llist_next(reverse);
 131
 132                set_waiting(cl, 0);
 133                closure_sub(cl, CLOSURE_WAITING + 1);
 134        }
 135}
 136EXPORT_SYMBOL_GPL(__closure_wake_up);
 137
 138bool closure_wait(struct closure_waitlist *list, struct closure *cl)
 139{
 140        if (atomic_read(&cl->remaining) & CLOSURE_WAITING)
 141                return false;
 142
 143        set_waiting(cl, _RET_IP_);
 144        atomic_add(CLOSURE_WAITING + 1, &cl->remaining);
 145        llist_add(&cl->list, &list->list);
 146
 147        return true;
 148}
 149EXPORT_SYMBOL_GPL(closure_wait);
 150
 151/**
 152 * closure_sync() - sleep until a closure a closure has nothing left to wait on
 153 *
 154 * Sleeps until the refcount hits 1 - the thread that's running the closure owns
 155 * the last refcount.
 156 */
 157void closure_sync(struct closure *cl)
 158{
 159        while (1) {
 160                __closure_start_sleep(cl);
 161                closure_set_ret_ip(cl);
 162
 163                if ((atomic_read(&cl->remaining) &
 164                     CLOSURE_REMAINING_MASK) == 1)
 165                        break;
 166
 167                schedule();
 168        }
 169
 170        __closure_end_sleep(cl);
 171}
 172EXPORT_SYMBOL_GPL(closure_sync);
 173
 174/**
 175 * closure_trylock() - try to acquire the closure, without waiting
 176 * @cl:         closure to lock
 177 *
 178 * Returns true if the closure was succesfully locked.
 179 */
 180bool closure_trylock(struct closure *cl, struct closure *parent)
 181{
 182        if (atomic_cmpxchg(&cl->remaining, -1,
 183                           CLOSURE_REMAINING_INITIALIZER) != -1)
 184                return false;
 185
 186        closure_set_ret_ip(cl);
 187
 188        smp_mb();
 189        cl->parent = parent;
 190        if (parent)
 191                closure_get(parent);
 192
 193        closure_debug_create(cl);
 194        return true;
 195}
 196EXPORT_SYMBOL_GPL(closure_trylock);
 197
 198void __closure_lock(struct closure *cl, struct closure *parent,
 199                    struct closure_waitlist *wait_list)
 200{
 201        struct closure wait;
 202        closure_init_stack(&wait);
 203
 204        while (1) {
 205                if (closure_trylock(cl, parent))
 206                        return;
 207
 208                closure_wait_event_sync(wait_list, &wait,
 209                                        atomic_read(&cl->remaining) == -1);
 210        }
 211}
 212EXPORT_SYMBOL_GPL(__closure_lock);
 213
 214static void closure_delay_timer_fn(unsigned long data)
 215{
 216        struct closure *cl = (struct closure *) data;
 217        closure_sub(cl, CLOSURE_TIMER + 1);
 218}
 219
 220void do_closure_timer_init(struct closure *cl)
 221{
 222        struct timer_list *timer = closure_timer(cl);
 223
 224        init_timer(timer);
 225        timer->data     = (unsigned long) cl;
 226        timer->function = closure_delay_timer_fn;
 227}
 228EXPORT_SYMBOL_GPL(do_closure_timer_init);
 229
 230bool __closure_delay(struct closure *cl, unsigned long delay,
 231                     struct timer_list *timer)
 232{
 233        if (atomic_read(&cl->remaining) & CLOSURE_TIMER)
 234                return false;
 235
 236        BUG_ON(timer_pending(timer));
 237
 238        timer->expires  = jiffies + delay;
 239
 240        atomic_add(CLOSURE_TIMER + 1, &cl->remaining);
 241        add_timer(timer);
 242        return true;
 243}
 244EXPORT_SYMBOL_GPL(__closure_delay);
 245
 246void __closure_flush(struct closure *cl, struct timer_list *timer)
 247{
 248        if (del_timer(timer))
 249                closure_sub(cl, CLOSURE_TIMER + 1);
 250}
 251EXPORT_SYMBOL_GPL(__closure_flush);
 252
 253void __closure_flush_sync(struct closure *cl, struct timer_list *timer)
 254{
 255        if (del_timer_sync(timer))
 256                closure_sub(cl, CLOSURE_TIMER + 1);
 257}
 258EXPORT_SYMBOL_GPL(__closure_flush_sync);
 259
 260#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
 261
 262static LIST_HEAD(closure_list);
 263static DEFINE_SPINLOCK(closure_list_lock);
 264
 265void closure_debug_create(struct closure *cl)
 266{
 267        unsigned long flags;
 268
 269        BUG_ON(cl->magic == CLOSURE_MAGIC_ALIVE);
 270        cl->magic = CLOSURE_MAGIC_ALIVE;
 271
 272        spin_lock_irqsave(&closure_list_lock, flags);
 273        list_add(&cl->all, &closure_list);
 274        spin_unlock_irqrestore(&closure_list_lock, flags);
 275}
 276EXPORT_SYMBOL_GPL(closure_debug_create);
 277
 278void closure_debug_destroy(struct closure *cl)
 279{
 280        unsigned long flags;
 281
 282        BUG_ON(cl->magic != CLOSURE_MAGIC_ALIVE);
 283        cl->magic = CLOSURE_MAGIC_DEAD;
 284
 285        spin_lock_irqsave(&closure_list_lock, flags);
 286        list_del(&cl->all);
 287        spin_unlock_irqrestore(&closure_list_lock, flags);
 288}
 289EXPORT_SYMBOL_GPL(closure_debug_destroy);
 290
 291static struct dentry *debug;
 292
 293#define work_data_bits(work) ((unsigned long *)(&(work)->data))
 294
 295static int debug_seq_show(struct seq_file *f, void *data)
 296{
 297        struct closure *cl;
 298        spin_lock_irq(&closure_list_lock);
 299
 300        list_for_each_entry(cl, &closure_list, all) {
 301                int r = atomic_read(&cl->remaining);
 302
 303                seq_printf(f, "%p: %pF -> %pf p %p r %i ",
 304                           cl, (void *) cl->ip, cl->fn, cl->parent,
 305                           r & CLOSURE_REMAINING_MASK);
 306
 307                seq_printf(f, "%s%s%s%s%s%s\n",
 308                           test_bit(WORK_STRUCT_PENDING,
 309                                    work_data_bits(&cl->work)) ? "Q" : "",
 310                           r & CLOSURE_RUNNING  ? "R" : "",
 311                           r & CLOSURE_BLOCKING ? "B" : "",
 312                           r & CLOSURE_STACK    ? "S" : "",
 313                           r & CLOSURE_SLEEPING ? "Sl" : "",
 314                           r & CLOSURE_TIMER    ? "T" : "");
 315
 316                if (r & CLOSURE_WAITING)
 317                        seq_printf(f, " W %pF\n",
 318                                   (void *) cl->waiting_on);
 319
 320                seq_printf(f, "\n");
 321        }
 322
 323        spin_unlock_irq(&closure_list_lock);
 324        return 0;
 325}
 326
 327static int debug_seq_open(struct inode *inode, struct file *file)
 328{
 329        return single_open(file, debug_seq_show, NULL);
 330}
 331
 332static const struct file_operations debug_ops = {
 333        .owner          = THIS_MODULE,
 334        .open           = debug_seq_open,
 335        .read           = seq_read,
 336        .release        = single_release
 337};
 338
 339void __init closure_debug_init(void)
 340{
 341        debug = debugfs_create_file("closures", 0400, NULL, NULL, &debug_ops);
 342}
 343
 344#endif
 345
 346MODULE_AUTHOR("Kent Overstreet <koverstreet@google.com>");
 347MODULE_LICENSE("GPL");
 348