linux/drivers/md/bcache/closure.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Asynchronous refcounty things
   4 *
   5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
   6 * Copyright 2012 Google, Inc.
   7 */
   8
   9#include <linux/debugfs.h>
  10#include <linux/module.h>
  11#include <linux/seq_file.h>
  12#include <linux/sched/debug.h>
  13
  14#include "closure.h"
  15
  16static inline void closure_put_after_sub(struct closure *cl, int flags)
  17{
  18        int r = flags & CLOSURE_REMAINING_MASK;
  19
  20        BUG_ON(flags & CLOSURE_GUARD_MASK);
  21        BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR));
  22
  23        if (!r) {
  24                if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) {
  25                        atomic_set(&cl->remaining,
  26                                   CLOSURE_REMAINING_INITIALIZER);
  27                        closure_queue(cl);
  28                } else {
  29                        struct closure *parent = cl->parent;
  30                        closure_fn *destructor = cl->fn;
  31
  32                        closure_debug_destroy(cl);
  33
  34                        if (destructor)
  35                                destructor(cl);
  36
  37                        if (parent)
  38                                closure_put(parent);
  39                }
  40        }
  41}
  42
  43/* For clearing flags with the same atomic op as a put */
  44void closure_sub(struct closure *cl, int v)
  45{
  46        closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining));
  47}
  48EXPORT_SYMBOL(closure_sub);
  49
  50/*
  51 * closure_put - decrement a closure's refcount
  52 */
  53void closure_put(struct closure *cl)
  54{
  55        closure_put_after_sub(cl, atomic_dec_return(&cl->remaining));
  56}
  57EXPORT_SYMBOL(closure_put);
  58
  59/*
  60 * closure_wake_up - wake up all closures on a wait list, without memory barrier
  61 */
  62void __closure_wake_up(struct closure_waitlist *wait_list)
  63{
  64        struct llist_node *list;
  65        struct closure *cl, *t;
  66        struct llist_node *reverse = NULL;
  67
  68        list = llist_del_all(&wait_list->list);
  69
  70        /* We first reverse the list to preserve FIFO ordering and fairness */
  71        reverse = llist_reverse_order(list);
  72
  73        /* Then do the wakeups */
  74        llist_for_each_entry_safe(cl, t, reverse, list) {
  75                closure_set_waiting(cl, 0);
  76                closure_sub(cl, CLOSURE_WAITING + 1);
  77        }
  78}
  79EXPORT_SYMBOL(__closure_wake_up);
  80
  81/**
  82 * closure_wait - add a closure to a waitlist
  83 * @waitlist: will own a ref on @cl, which will be released when
  84 * closure_wake_up() is called on @waitlist.
  85 * @cl: closure pointer.
  86 *
  87 */
  88bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
  89{
  90        if (atomic_read(&cl->remaining) & CLOSURE_WAITING)
  91                return false;
  92
  93        closure_set_waiting(cl, _RET_IP_);
  94        atomic_add(CLOSURE_WAITING + 1, &cl->remaining);
  95        llist_add(&cl->list, &waitlist->list);
  96
  97        return true;
  98}
  99EXPORT_SYMBOL(closure_wait);
 100
 101struct closure_syncer {
 102        struct task_struct      *task;
 103        int                     done;
 104};
 105
 106static void closure_sync_fn(struct closure *cl)
 107{
 108        cl->s->done = 1;
 109        wake_up_process(cl->s->task);
 110}
 111
 112void __sched __closure_sync(struct closure *cl)
 113{
 114        struct closure_syncer s = { .task = current };
 115
 116        cl->s = &s;
 117        continue_at(cl, closure_sync_fn, NULL);
 118
 119        while (1) {
 120                set_current_state(TASK_UNINTERRUPTIBLE);
 121                if (s.done)
 122                        break;
 123                schedule();
 124        }
 125
 126        __set_current_state(TASK_RUNNING);
 127}
 128EXPORT_SYMBOL(__closure_sync);
 129
 130#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
 131
 132static LIST_HEAD(closure_list);
 133static DEFINE_SPINLOCK(closure_list_lock);
 134
 135void closure_debug_create(struct closure *cl)
 136{
 137        unsigned long flags;
 138
 139        BUG_ON(cl->magic == CLOSURE_MAGIC_ALIVE);
 140        cl->magic = CLOSURE_MAGIC_ALIVE;
 141
 142        spin_lock_irqsave(&closure_list_lock, flags);
 143        list_add(&cl->all, &closure_list);
 144        spin_unlock_irqrestore(&closure_list_lock, flags);
 145}
 146EXPORT_SYMBOL(closure_debug_create);
 147
 148void closure_debug_destroy(struct closure *cl)
 149{
 150        unsigned long flags;
 151
 152        BUG_ON(cl->magic != CLOSURE_MAGIC_ALIVE);
 153        cl->magic = CLOSURE_MAGIC_DEAD;
 154
 155        spin_lock_irqsave(&closure_list_lock, flags);
 156        list_del(&cl->all);
 157        spin_unlock_irqrestore(&closure_list_lock, flags);
 158}
 159EXPORT_SYMBOL(closure_debug_destroy);
 160
 161static struct dentry *closure_debug;
 162
 163static int debug_seq_show(struct seq_file *f, void *data)
 164{
 165        struct closure *cl;
 166
 167        spin_lock_irq(&closure_list_lock);
 168
 169        list_for_each_entry(cl, &closure_list, all) {
 170                int r = atomic_read(&cl->remaining);
 171
 172                seq_printf(f, "%p: %pS -> %pS p %p r %i ",
 173                           cl, (void *) cl->ip, cl->fn, cl->parent,
 174                           r & CLOSURE_REMAINING_MASK);
 175
 176                seq_printf(f, "%s%s\n",
 177                           test_bit(WORK_STRUCT_PENDING_BIT,
 178                                    work_data_bits(&cl->work)) ? "Q" : "",
 179                           r & CLOSURE_RUNNING  ? "R" : "");
 180
 181                if (r & CLOSURE_WAITING)
 182                        seq_printf(f, " W %pS\n",
 183                                   (void *) cl->waiting_on);
 184
 185                seq_printf(f, "\n");
 186        }
 187
 188        spin_unlock_irq(&closure_list_lock);
 189        return 0;
 190}
 191
 192static int debug_seq_open(struct inode *inode, struct file *file)
 193{
 194        return single_open(file, debug_seq_show, NULL);
 195}
 196
 197static const struct file_operations debug_ops = {
 198        .owner          = THIS_MODULE,
 199        .open           = debug_seq_open,
 200        .read           = seq_read,
 201        .release        = single_release
 202};
 203
 204void  __init closure_debug_init(void)
 205{
 206        if (!IS_ERR_OR_NULL(bcache_debug))
 207                /*
 208                 * it is unnecessary to check return value of
 209                 * debugfs_create_file(), we should not care
 210                 * about this.
 211                 */
 212                closure_debug = debugfs_create_file(
 213                        "closures", 0400, bcache_debug, NULL, &debug_ops);
 214}
 215#endif
 216
 217MODULE_AUTHOR("Kent Overstreet <koverstreet@google.com>");
 218MODULE_LICENSE("GPL");
 219