linux/drivers/md/dm-bio-prison-v2.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2012-2017 Red Hat, Inc.
   3 *
   4 * This file is released under the GPL.
   5 */
   6
   7#include "dm.h"
   8#include "dm-bio-prison-v2.h"
   9
  10#include <linux/spinlock.h>
  11#include <linux/mempool.h>
  12#include <linux/module.h>
  13#include <linux/slab.h>
  14#include <linux/rwsem.h>
  15
  16/*----------------------------------------------------------------*/
  17
  18#define MIN_CELLS 1024
  19
  20struct dm_bio_prison_v2 {
  21        struct workqueue_struct *wq;
  22
  23        spinlock_t lock;
  24        struct rb_root cells;
  25        mempool_t cell_pool;
  26};
  27
  28static struct kmem_cache *_cell_cache;
  29
  30/*----------------------------------------------------------------*/
  31
  32/*
  33 * @nr_cells should be the number of cells you want in use _concurrently_.
  34 * Don't confuse it with the number of distinct keys.
  35 */
  36struct dm_bio_prison_v2 *dm_bio_prison_create_v2(struct workqueue_struct *wq)
  37{
  38        struct dm_bio_prison_v2 *prison = kzalloc(sizeof(*prison), GFP_KERNEL);
  39        int ret;
  40
  41        if (!prison)
  42                return NULL;
  43
  44        prison->wq = wq;
  45        spin_lock_init(&prison->lock);
  46
  47        ret = mempool_init_slab_pool(&prison->cell_pool, MIN_CELLS, _cell_cache);
  48        if (ret) {
  49                kfree(prison);
  50                return NULL;
  51        }
  52
  53        prison->cells = RB_ROOT;
  54
  55        return prison;
  56}
  57EXPORT_SYMBOL_GPL(dm_bio_prison_create_v2);
  58
  59void dm_bio_prison_destroy_v2(struct dm_bio_prison_v2 *prison)
  60{
  61        mempool_exit(&prison->cell_pool);
  62        kfree(prison);
  63}
  64EXPORT_SYMBOL_GPL(dm_bio_prison_destroy_v2);
  65
  66struct dm_bio_prison_cell_v2 *dm_bio_prison_alloc_cell_v2(struct dm_bio_prison_v2 *prison, gfp_t gfp)
  67{
  68        return mempool_alloc(&prison->cell_pool, gfp);
  69}
  70EXPORT_SYMBOL_GPL(dm_bio_prison_alloc_cell_v2);
  71
  72void dm_bio_prison_free_cell_v2(struct dm_bio_prison_v2 *prison,
  73                                struct dm_bio_prison_cell_v2 *cell)
  74{
  75        mempool_free(cell, &prison->cell_pool);
  76}
  77EXPORT_SYMBOL_GPL(dm_bio_prison_free_cell_v2);
  78
  79static void __setup_new_cell(struct dm_cell_key_v2 *key,
  80                             struct dm_bio_prison_cell_v2 *cell)
  81{
  82        memset(cell, 0, sizeof(*cell));
  83        memcpy(&cell->key, key, sizeof(cell->key));
  84        bio_list_init(&cell->bios);
  85}
  86
  87static int cmp_keys(struct dm_cell_key_v2 *lhs,
  88                    struct dm_cell_key_v2 *rhs)
  89{
  90        if (lhs->virtual < rhs->virtual)
  91                return -1;
  92
  93        if (lhs->virtual > rhs->virtual)
  94                return 1;
  95
  96        if (lhs->dev < rhs->dev)
  97                return -1;
  98
  99        if (lhs->dev > rhs->dev)
 100                return 1;
 101
 102        if (lhs->block_end <= rhs->block_begin)
 103                return -1;
 104
 105        if (lhs->block_begin >= rhs->block_end)
 106                return 1;
 107
 108        return 0;
 109}
 110
 111/*
 112 * Returns true if node found, otherwise it inserts a new one.
 113 */
 114static bool __find_or_insert(struct dm_bio_prison_v2 *prison,
 115                             struct dm_cell_key_v2 *key,
 116                             struct dm_bio_prison_cell_v2 *cell_prealloc,
 117                             struct dm_bio_prison_cell_v2 **result)
 118{
 119        int r;
 120        struct rb_node **new = &prison->cells.rb_node, *parent = NULL;
 121
 122        while (*new) {
 123                struct dm_bio_prison_cell_v2 *cell =
 124                        rb_entry(*new, struct dm_bio_prison_cell_v2, node);
 125
 126                r = cmp_keys(key, &cell->key);
 127
 128                parent = *new;
 129                if (r < 0)
 130                        new = &((*new)->rb_left);
 131
 132                else if (r > 0)
 133                        new = &((*new)->rb_right);
 134
 135                else {
 136                        *result = cell;
 137                        return true;
 138                }
 139        }
 140
 141        __setup_new_cell(key, cell_prealloc);
 142        *result = cell_prealloc;
 143        rb_link_node(&cell_prealloc->node, parent, new);
 144        rb_insert_color(&cell_prealloc->node, &prison->cells);
 145
 146        return false;
 147}
 148
 149static bool __get(struct dm_bio_prison_v2 *prison,
 150                  struct dm_cell_key_v2 *key,
 151                  unsigned lock_level,
 152                  struct bio *inmate,
 153                  struct dm_bio_prison_cell_v2 *cell_prealloc,
 154                  struct dm_bio_prison_cell_v2 **cell)
 155{
 156        if (__find_or_insert(prison, key, cell_prealloc, cell)) {
 157                if ((*cell)->exclusive_lock) {
 158                        if (lock_level <= (*cell)->exclusive_level) {
 159                                bio_list_add(&(*cell)->bios, inmate);
 160                                return false;
 161                        }
 162                }
 163
 164                (*cell)->shared_count++;
 165
 166        } else
 167                (*cell)->shared_count = 1;
 168
 169        return true;
 170}
 171
 172bool dm_cell_get_v2(struct dm_bio_prison_v2 *prison,
 173                    struct dm_cell_key_v2 *key,
 174                    unsigned lock_level,
 175                    struct bio *inmate,
 176                    struct dm_bio_prison_cell_v2 *cell_prealloc,
 177                    struct dm_bio_prison_cell_v2 **cell_result)
 178{
 179        int r;
 180
 181        spin_lock_irq(&prison->lock);
 182        r = __get(prison, key, lock_level, inmate, cell_prealloc, cell_result);
 183        spin_unlock_irq(&prison->lock);
 184
 185        return r;
 186}
 187EXPORT_SYMBOL_GPL(dm_cell_get_v2);
 188
 189static bool __put(struct dm_bio_prison_v2 *prison,
 190                  struct dm_bio_prison_cell_v2 *cell)
 191{
 192        BUG_ON(!cell->shared_count);
 193        cell->shared_count--;
 194
 195        // FIXME: shared locks granted above the lock level could starve this
 196        if (!cell->shared_count) {
 197                if (cell->exclusive_lock){
 198                        if (cell->quiesce_continuation) {
 199                                queue_work(prison->wq, cell->quiesce_continuation);
 200                                cell->quiesce_continuation = NULL;
 201                        }
 202                } else {
 203                        rb_erase(&cell->node, &prison->cells);
 204                        return true;
 205                }
 206        }
 207
 208        return false;
 209}
 210
 211bool dm_cell_put_v2(struct dm_bio_prison_v2 *prison,
 212                    struct dm_bio_prison_cell_v2 *cell)
 213{
 214        bool r;
 215        unsigned long flags;
 216
 217        spin_lock_irqsave(&prison->lock, flags);
 218        r = __put(prison, cell);
 219        spin_unlock_irqrestore(&prison->lock, flags);
 220
 221        return r;
 222}
 223EXPORT_SYMBOL_GPL(dm_cell_put_v2);
 224
 225static int __lock(struct dm_bio_prison_v2 *prison,
 226                  struct dm_cell_key_v2 *key,
 227                  unsigned lock_level,
 228                  struct dm_bio_prison_cell_v2 *cell_prealloc,
 229                  struct dm_bio_prison_cell_v2 **cell_result)
 230{
 231        struct dm_bio_prison_cell_v2 *cell;
 232
 233        if (__find_or_insert(prison, key, cell_prealloc, &cell)) {
 234                if (cell->exclusive_lock)
 235                        return -EBUSY;
 236
 237                cell->exclusive_lock = true;
 238                cell->exclusive_level = lock_level;
 239                *cell_result = cell;
 240
 241                // FIXME: we don't yet know what level these shared locks
 242                // were taken at, so have to quiesce them all.
 243                return cell->shared_count > 0;
 244
 245        } else {
 246                cell = cell_prealloc;
 247                cell->shared_count = 0;
 248                cell->exclusive_lock = true;
 249                cell->exclusive_level = lock_level;
 250                *cell_result = cell;
 251        }
 252
 253        return 0;
 254}
 255
 256int dm_cell_lock_v2(struct dm_bio_prison_v2 *prison,
 257                    struct dm_cell_key_v2 *key,
 258                    unsigned lock_level,
 259                    struct dm_bio_prison_cell_v2 *cell_prealloc,
 260                    struct dm_bio_prison_cell_v2 **cell_result)
 261{
 262        int r;
 263
 264        spin_lock_irq(&prison->lock);
 265        r = __lock(prison, key, lock_level, cell_prealloc, cell_result);
 266        spin_unlock_irq(&prison->lock);
 267
 268        return r;
 269}
 270EXPORT_SYMBOL_GPL(dm_cell_lock_v2);
 271
 272static void __quiesce(struct dm_bio_prison_v2 *prison,
 273                      struct dm_bio_prison_cell_v2 *cell,
 274                      struct work_struct *continuation)
 275{
 276        if (!cell->shared_count)
 277                queue_work(prison->wq, continuation);
 278        else
 279                cell->quiesce_continuation = continuation;
 280}
 281
 282void dm_cell_quiesce_v2(struct dm_bio_prison_v2 *prison,
 283                        struct dm_bio_prison_cell_v2 *cell,
 284                        struct work_struct *continuation)
 285{
 286        spin_lock_irq(&prison->lock);
 287        __quiesce(prison, cell, continuation);
 288        spin_unlock_irq(&prison->lock);
 289}
 290EXPORT_SYMBOL_GPL(dm_cell_quiesce_v2);
 291
 292static int __promote(struct dm_bio_prison_v2 *prison,
 293                     struct dm_bio_prison_cell_v2 *cell,
 294                     unsigned new_lock_level)
 295{
 296        if (!cell->exclusive_lock)
 297                return -EINVAL;
 298
 299        cell->exclusive_level = new_lock_level;
 300        return cell->shared_count > 0;
 301}
 302
 303int dm_cell_lock_promote_v2(struct dm_bio_prison_v2 *prison,
 304                            struct dm_bio_prison_cell_v2 *cell,
 305                            unsigned new_lock_level)
 306{
 307        int r;
 308
 309        spin_lock_irq(&prison->lock);
 310        r = __promote(prison, cell, new_lock_level);
 311        spin_unlock_irq(&prison->lock);
 312
 313        return r;
 314}
 315EXPORT_SYMBOL_GPL(dm_cell_lock_promote_v2);
 316
 317static bool __unlock(struct dm_bio_prison_v2 *prison,
 318                     struct dm_bio_prison_cell_v2 *cell,
 319                     struct bio_list *bios)
 320{
 321        BUG_ON(!cell->exclusive_lock);
 322
 323        bio_list_merge(bios, &cell->bios);
 324        bio_list_init(&cell->bios);
 325
 326        if (cell->shared_count) {
 327                cell->exclusive_lock = false;
 328                return false;
 329        }
 330
 331        rb_erase(&cell->node, &prison->cells);
 332        return true;
 333}
 334
 335bool dm_cell_unlock_v2(struct dm_bio_prison_v2 *prison,
 336                       struct dm_bio_prison_cell_v2 *cell,
 337                       struct bio_list *bios)
 338{
 339        bool r;
 340
 341        spin_lock_irq(&prison->lock);
 342        r = __unlock(prison, cell, bios);
 343        spin_unlock_irq(&prison->lock);
 344
 345        return r;
 346}
 347EXPORT_SYMBOL_GPL(dm_cell_unlock_v2);
 348
 349/*----------------------------------------------------------------*/
 350
 351int __init dm_bio_prison_init_v2(void)
 352{
 353        _cell_cache = KMEM_CACHE(dm_bio_prison_cell_v2, 0);
 354        if (!_cell_cache)
 355                return -ENOMEM;
 356
 357        return 0;
 358}
 359
 360void dm_bio_prison_exit_v2(void)
 361{
 362        kmem_cache_destroy(_cell_cache);
 363        _cell_cache = NULL;
 364}
 365