linux/drivers/md/bcache/closure.c
<<
>>
Prefs
   1/*
   2 * Asynchronous refcounty things
   3 *
   4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
   5 * Copyright 2012 Google, Inc.
   6 */
   7
   8#include <linux/debugfs.h>
   9#include <linux/module.h>
  10#include <linux/seq_file.h>
  11
  12#include "closure.h"
  13
  14static inline void closure_put_after_sub(struct closure *cl, int flags)
  15{
  16        int r = flags & CLOSURE_REMAINING_MASK;
  17
  18        BUG_ON(flags & CLOSURE_GUARD_MASK);
  19        BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR));
  20
  21        /* Must deliver precisely one wakeup */
  22        if (r == 1 && (flags & CLOSURE_SLEEPING))
  23                wake_up_process(cl->task);
  24
  25        if (!r) {
  26                if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) {
  27                        atomic_set(&cl->remaining,
  28                                   CLOSURE_REMAINING_INITIALIZER);
  29                        closure_queue(cl);
  30                } else {
  31                        struct closure *parent = cl->parent;
  32                        closure_fn *destructor = cl->fn;
  33
  34                        closure_debug_destroy(cl);
  35
  36                        if (destructor)
  37                                destructor(cl);
  38
  39                        if (parent)
  40                                closure_put(parent);
  41                }
  42        }
  43}
  44
  45/* For clearing flags with the same atomic op as a put */
  46void closure_sub(struct closure *cl, int v)
  47{
  48        closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining));
  49}
  50EXPORT_SYMBOL(closure_sub);
  51
  52/**
  53 * closure_put - decrement a closure's refcount
  54 */
  55void closure_put(struct closure *cl)
  56{
  57        closure_put_after_sub(cl, atomic_dec_return(&cl->remaining));
  58}
  59EXPORT_SYMBOL(closure_put);
  60
  61/**
  62 * closure_wake_up - wake up all closures on a wait list, without memory barrier
  63 */
  64void __closure_wake_up(struct closure_waitlist *wait_list)
  65{
  66        struct llist_node *list;
  67        struct closure *cl;
  68        struct llist_node *reverse = NULL;
  69
  70        list = llist_del_all(&wait_list->list);
  71
  72        /* We first reverse the list to preserve FIFO ordering and fairness */
  73
  74        while (list) {
  75                struct llist_node *t = list;
  76                list = llist_next(list);
  77
  78                t->next = reverse;
  79                reverse = t;
  80        }
  81
  82        /* Then do the wakeups */
  83
  84        while (reverse) {
  85                cl = container_of(reverse, struct closure, list);
  86                reverse = llist_next(reverse);
  87
  88                closure_set_waiting(cl, 0);
  89                closure_sub(cl, CLOSURE_WAITING + 1);
  90        }
  91}
  92EXPORT_SYMBOL(__closure_wake_up);
  93
  94/**
  95 * closure_wait - add a closure to a waitlist
  96 *
  97 * @waitlist will own a ref on @cl, which will be released when
  98 * closure_wake_up() is called on @waitlist.
  99 *
 100 */
 101bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
 102{
 103        if (atomic_read(&cl->remaining) & CLOSURE_WAITING)
 104                return false;
 105
 106        closure_set_waiting(cl, _RET_IP_);
 107        atomic_add(CLOSURE_WAITING + 1, &cl->remaining);
 108        llist_add(&cl->list, &waitlist->list);
 109
 110        return true;
 111}
 112EXPORT_SYMBOL(closure_wait);
 113
 114/**
 115 * closure_sync - sleep until a closure has nothing left to wait on
 116 *
 117 * Sleeps until the refcount hits 1 - the thread that's running the closure owns
 118 * the last refcount.
 119 */
 120void closure_sync(struct closure *cl)
 121{
 122        while (1) {
 123                __closure_start_sleep(cl);
 124                closure_set_ret_ip(cl);
 125
 126                if ((atomic_read(&cl->remaining) &
 127                     CLOSURE_REMAINING_MASK) == 1)
 128                        break;
 129
 130                schedule();
 131        }
 132
 133        __closure_end_sleep(cl);
 134}
 135EXPORT_SYMBOL(closure_sync);
 136
 137#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
 138
 139static LIST_HEAD(closure_list);
 140static DEFINE_SPINLOCK(closure_list_lock);
 141
 142void closure_debug_create(struct closure *cl)
 143{
 144        unsigned long flags;
 145
 146        BUG_ON(cl->magic == CLOSURE_MAGIC_ALIVE);
 147        cl->magic = CLOSURE_MAGIC_ALIVE;
 148
 149        spin_lock_irqsave(&closure_list_lock, flags);
 150        list_add(&cl->all, &closure_list);
 151        spin_unlock_irqrestore(&closure_list_lock, flags);
 152}
 153EXPORT_SYMBOL(closure_debug_create);
 154
 155void closure_debug_destroy(struct closure *cl)
 156{
 157        unsigned long flags;
 158
 159        BUG_ON(cl->magic != CLOSURE_MAGIC_ALIVE);
 160        cl->magic = CLOSURE_MAGIC_DEAD;
 161
 162        spin_lock_irqsave(&closure_list_lock, flags);
 163        list_del(&cl->all);
 164        spin_unlock_irqrestore(&closure_list_lock, flags);
 165}
 166EXPORT_SYMBOL(closure_debug_destroy);
 167
 168static struct dentry *debug;
 169
 170static int debug_seq_show(struct seq_file *f, void *data)
 171{
 172        struct closure *cl;
 173        spin_lock_irq(&closure_list_lock);
 174
 175        list_for_each_entry(cl, &closure_list, all) {
 176                int r = atomic_read(&cl->remaining);
 177
 178                seq_printf(f, "%p: %pF -> %pf p %p r %i ",
 179                           cl, (void *) cl->ip, cl->fn, cl->parent,
 180                           r & CLOSURE_REMAINING_MASK);
 181
 182                seq_printf(f, "%s%s%s%s\n",
 183                           test_bit(WORK_STRUCT_PENDING_BIT,
 184                                    work_data_bits(&cl->work)) ? "Q" : "",
 185                           r & CLOSURE_RUNNING  ? "R" : "",
 186                           r & CLOSURE_STACK    ? "S" : "",
 187                           r & CLOSURE_SLEEPING ? "Sl" : "");
 188
 189                if (r & CLOSURE_WAITING)
 190                        seq_printf(f, " W %pF\n",
 191                                   (void *) cl->waiting_on);
 192
 193                seq_printf(f, "\n");
 194        }
 195
 196        spin_unlock_irq(&closure_list_lock);
 197        return 0;
 198}
 199
 200static int debug_seq_open(struct inode *inode, struct file *file)
 201{
 202        return single_open(file, debug_seq_show, NULL);
 203}
 204
 205static const struct file_operations debug_ops = {
 206        .owner          = THIS_MODULE,
 207        .open           = debug_seq_open,
 208        .read           = seq_read,
 209        .release        = single_release
 210};
 211
 212void __init closure_debug_init(void)
 213{
 214        debug = debugfs_create_file("closures", 0400, NULL, NULL, &debug_ops);
 215}
 216
 217#endif
 218
 219MODULE_AUTHOR("Kent Overstreet <koverstreet@google.com>");
 220MODULE_LICENSE("GPL");
 221