linux/drivers/md/dm-bufio.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2009-2011 Red Hat, Inc.
   3 *
   4 * Author: Mikulas Patocka <mpatocka@redhat.com>
   5 *
   6 * This file is released under the GPL.
   7 */
   8
   9#include <linux/dm-bufio.h>
  10
  11#include <linux/device-mapper.h>
  12#include <linux/dm-io.h>
  13#include <linux/slab.h>
  14#include <linux/sched/mm.h>
  15#include <linux/jiffies.h>
  16#include <linux/vmalloc.h>
  17#include <linux/shrinker.h>
  18#include <linux/module.h>
  19#include <linux/rbtree.h>
  20#include <linux/stacktrace.h>
  21
  22#define DM_MSG_PREFIX "bufio"
  23
  24/*
  25 * Memory management policy:
  26 *      Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
  27 *      or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
  28 *      Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
  29 *      Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
  30 *      dirty buffers.
  31 */
  32#define DM_BUFIO_MIN_BUFFERS            8
  33
  34#define DM_BUFIO_MEMORY_PERCENT         2
  35#define DM_BUFIO_VMALLOC_PERCENT        25
  36#define DM_BUFIO_WRITEBACK_RATIO        3
  37#define DM_BUFIO_LOW_WATERMARK_RATIO    16
  38
  39/*
  40 * Check buffer ages in this interval (seconds)
  41 */
  42#define DM_BUFIO_WORK_TIMER_SECS        30
  43
  44/*
  45 * Free buffers when they are older than this (seconds)
  46 */
  47#define DM_BUFIO_DEFAULT_AGE_SECS       300
  48
  49/*
  50 * The nr of bytes of cached data to keep around.
  51 */
  52#define DM_BUFIO_DEFAULT_RETAIN_BYTES   (256 * 1024)
  53
  54/*
  55 * Align buffer writes to this boundary.
  56 * Tests show that SSDs have the highest IOPS when using 4k writes.
  57 */
  58#define DM_BUFIO_WRITE_ALIGN            4096
  59
  60/*
  61 * dm_buffer->list_mode
  62 */
  63#define LIST_CLEAN      0
  64#define LIST_DIRTY      1
  65#define LIST_SIZE       2
  66
  67/*
  68 * Linking of buffers:
  69 *      All buffers are linked to buffer_tree with their node field.
  70 *
  71 *      Clean buffers that are not being written (B_WRITING not set)
  72 *      are linked to lru[LIST_CLEAN] with their lru_list field.
  73 *
  74 *      Dirty and clean buffers that are being written are linked to
  75 *      lru[LIST_DIRTY] with their lru_list field. When the write
  76 *      finishes, the buffer cannot be relinked immediately (because we
  77 *      are in an interrupt context and relinking requires process
  78 *      context), so some clean-not-writing buffers can be held on
  79 *      dirty_lru too.  They are later added to lru in the process
  80 *      context.
  81 */
  82struct dm_bufio_client {
  83        struct mutex lock;
  84
  85        struct list_head lru[LIST_SIZE];
  86        unsigned long n_buffers[LIST_SIZE];
  87
  88        struct block_device *bdev;
  89        unsigned block_size;
  90        s8 sectors_per_block_bits;
  91        void (*alloc_callback)(struct dm_buffer *);
  92        void (*write_callback)(struct dm_buffer *);
  93
  94        struct kmem_cache *slab_buffer;
  95        struct kmem_cache *slab_cache;
  96        struct dm_io_client *dm_io;
  97
  98        struct list_head reserved_buffers;
  99        unsigned need_reserved_buffers;
 100
 101        unsigned minimum_buffers;
 102
 103        struct rb_root buffer_tree;
 104        wait_queue_head_t free_buffer_wait;
 105
 106        sector_t start;
 107
 108        int async_write_error;
 109
 110        struct list_head client_list;
 111
 112        struct shrinker shrinker;
 113        struct work_struct shrink_work;
 114        atomic_long_t need_shrink;
 115};
 116
 117/*
 118 * Buffer state bits.
 119 */
 120#define B_READING       0
 121#define B_WRITING       1
 122#define B_DIRTY         2
 123
 124/*
 125 * Describes how the block was allocated:
 126 * kmem_cache_alloc(), __get_free_pages() or vmalloc().
 127 * See the comment at alloc_buffer_data.
 128 */
 129enum data_mode {
 130        DATA_MODE_SLAB = 0,
 131        DATA_MODE_GET_FREE_PAGES = 1,
 132        DATA_MODE_VMALLOC = 2,
 133        DATA_MODE_LIMIT = 3
 134};
 135
 136struct dm_buffer {
 137        struct rb_node node;
 138        struct list_head lru_list;
 139        struct list_head global_list;
 140        sector_t block;
 141        void *data;
 142        unsigned char data_mode;                /* DATA_MODE_* */
 143        unsigned char list_mode;                /* LIST_* */
 144        blk_status_t read_error;
 145        blk_status_t write_error;
 146        unsigned accessed;
 147        unsigned hold_count;
 148        unsigned long state;
 149        unsigned long last_accessed;
 150        unsigned dirty_start;
 151        unsigned dirty_end;
 152        unsigned write_start;
 153        unsigned write_end;
 154        struct dm_bufio_client *c;
 155        struct list_head write_list;
 156        void (*end_io)(struct dm_buffer *, blk_status_t);
 157#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
 158#define MAX_STACK 10
 159        unsigned int stack_len;
 160        unsigned long stack_entries[MAX_STACK];
 161#endif
 162};
 163
 164/*----------------------------------------------------------------*/
 165
 166#define dm_bufio_in_request()   (!!current->bio_list)
 167
 168static void dm_bufio_lock(struct dm_bufio_client *c)
 169{
 170        mutex_lock_nested(&c->lock, dm_bufio_in_request());
 171}
 172
 173static int dm_bufio_trylock(struct dm_bufio_client *c)
 174{
 175        return mutex_trylock(&c->lock);
 176}
 177
 178static void dm_bufio_unlock(struct dm_bufio_client *c)
 179{
 180        mutex_unlock(&c->lock);
 181}
 182
 183/*----------------------------------------------------------------*/
 184
 185/*
 186 * Default cache size: available memory divided by the ratio.
 187 */
 188static unsigned long dm_bufio_default_cache_size;
 189
 190/*
 191 * Total cache size set by the user.
 192 */
 193static unsigned long dm_bufio_cache_size;
 194
 195/*
 196 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
 197 * at any time.  If it disagrees, the user has changed cache size.
 198 */
 199static unsigned long dm_bufio_cache_size_latch;
 200
 201static DEFINE_SPINLOCK(global_spinlock);
 202
 203static LIST_HEAD(global_queue);
 204
 205static unsigned long global_num = 0;
 206
 207/*
 208 * Buffers are freed after this timeout
 209 */
 210static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
 211static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
 212
 213static unsigned long dm_bufio_peak_allocated;
 214static unsigned long dm_bufio_allocated_kmem_cache;
 215static unsigned long dm_bufio_allocated_get_free_pages;
 216static unsigned long dm_bufio_allocated_vmalloc;
 217static unsigned long dm_bufio_current_allocated;
 218
 219/*----------------------------------------------------------------*/
 220
 221/*
 222 * The current number of clients.
 223 */
 224static int dm_bufio_client_count;
 225
 226/*
 227 * The list of all clients.
 228 */
 229static LIST_HEAD(dm_bufio_all_clients);
 230
 231/*
 232 * This mutex protects dm_bufio_cache_size_latch and dm_bufio_client_count
 233 */
 234static DEFINE_MUTEX(dm_bufio_clients_lock);
 235
 236static struct workqueue_struct *dm_bufio_wq;
 237static struct delayed_work dm_bufio_cleanup_old_work;
 238static struct work_struct dm_bufio_replacement_work;
 239
 240
 241#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
 242static void buffer_record_stack(struct dm_buffer *b)
 243{
 244        b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2);
 245}
 246#endif
 247
 248/*----------------------------------------------------------------
 249 * A red/black tree acts as an index for all the buffers.
 250 *--------------------------------------------------------------*/
 251static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
 252{
 253        struct rb_node *n = c->buffer_tree.rb_node;
 254        struct dm_buffer *b;
 255
 256        while (n) {
 257                b = container_of(n, struct dm_buffer, node);
 258
 259                if (b->block == block)
 260                        return b;
 261
 262                n = block < b->block ? n->rb_left : n->rb_right;
 263        }
 264
 265        return NULL;
 266}
 267
 268static struct dm_buffer *__find_next(struct dm_bufio_client *c, sector_t block)
 269{
 270        struct rb_node *n = c->buffer_tree.rb_node;
 271        struct dm_buffer *b;
 272        struct dm_buffer *best = NULL;
 273
 274        while (n) {
 275                b = container_of(n, struct dm_buffer, node);
 276
 277                if (b->block == block)
 278                        return b;
 279
 280                if (block <= b->block) {
 281                        n = n->rb_left;
 282                        best = b;
 283                } else {
 284                        n = n->rb_right;
 285                }
 286        }
 287
 288        return best;
 289}
 290
 291static void __insert(struct dm_bufio_client *c, struct dm_buffer *b)
 292{
 293        struct rb_node **new = &c->buffer_tree.rb_node, *parent = NULL;
 294        struct dm_buffer *found;
 295
 296        while (*new) {
 297                found = container_of(*new, struct dm_buffer, node);
 298
 299                if (found->block == b->block) {
 300                        BUG_ON(found != b);
 301                        return;
 302                }
 303
 304                parent = *new;
 305                new = b->block < found->block ?
 306                        &found->node.rb_left : &found->node.rb_right;
 307        }
 308
 309        rb_link_node(&b->node, parent, new);
 310        rb_insert_color(&b->node, &c->buffer_tree);
 311}
 312
 313static void __remove(struct dm_bufio_client *c, struct dm_buffer *b)
 314{
 315        rb_erase(&b->node, &c->buffer_tree);
 316}
 317
 318/*----------------------------------------------------------------*/
 319
 320static void adjust_total_allocated(struct dm_buffer *b, bool unlink)
 321{
 322        unsigned char data_mode;
 323        long diff;
 324
 325        static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
 326                &dm_bufio_allocated_kmem_cache,
 327                &dm_bufio_allocated_get_free_pages,
 328                &dm_bufio_allocated_vmalloc,
 329        };
 330
 331        data_mode = b->data_mode;
 332        diff = (long)b->c->block_size;
 333        if (unlink)
 334                diff = -diff;
 335
 336        spin_lock(&global_spinlock);
 337
 338        *class_ptr[data_mode] += diff;
 339
 340        dm_bufio_current_allocated += diff;
 341
 342        if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
 343                dm_bufio_peak_allocated = dm_bufio_current_allocated;
 344
 345        b->accessed = 1;
 346
 347        if (!unlink) {
 348                list_add(&b->global_list, &global_queue);
 349                global_num++;
 350                if (dm_bufio_current_allocated > dm_bufio_cache_size)
 351                        queue_work(dm_bufio_wq, &dm_bufio_replacement_work);
 352        } else {
 353                list_del(&b->global_list);
 354                global_num--;
 355        }
 356
 357        spin_unlock(&global_spinlock);
 358}
 359
 360/*
 361 * Change the number of clients and recalculate per-client limit.
 362 */
 363static void __cache_size_refresh(void)
 364{
 365        BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
 366        BUG_ON(dm_bufio_client_count < 0);
 367
 368        dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size);
 369
 370        /*
 371         * Use default if set to 0 and report the actual cache size used.
 372         */
 373        if (!dm_bufio_cache_size_latch) {
 374                (void)cmpxchg(&dm_bufio_cache_size, 0,
 375                              dm_bufio_default_cache_size);
 376                dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
 377        }
 378}
 379
 380/*
 381 * Allocating buffer data.
 382 *
 383 * Small buffers are allocated with kmem_cache, to use space optimally.
 384 *
 385 * For large buffers, we choose between get_free_pages and vmalloc.
 386 * Each has advantages and disadvantages.
 387 *
 388 * __get_free_pages can randomly fail if the memory is fragmented.
 389 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
 390 * as low as 128M) so using it for caching is not appropriate.
 391 *
 392 * If the allocation may fail we use __get_free_pages. Memory fragmentation
 393 * won't have a fatal effect here, but it just causes flushes of some other
 394 * buffers and more I/O will be performed. Don't use __get_free_pages if it
 395 * always fails (i.e. order >= MAX_ORDER).
 396 *
 397 * If the allocation shouldn't fail we use __vmalloc. This is only for the
 398 * initial reserve allocation, so there's no risk of wasting all vmalloc
 399 * space.
 400 */
 401static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
 402                               unsigned char *data_mode)
 403{
 404        if (unlikely(c->slab_cache != NULL)) {
 405                *data_mode = DATA_MODE_SLAB;
 406                return kmem_cache_alloc(c->slab_cache, gfp_mask);
 407        }
 408
 409        if (c->block_size <= KMALLOC_MAX_SIZE &&
 410            gfp_mask & __GFP_NORETRY) {
 411                *data_mode = DATA_MODE_GET_FREE_PAGES;
 412                return (void *)__get_free_pages(gfp_mask,
 413                                                c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
 414        }
 415
 416        *data_mode = DATA_MODE_VMALLOC;
 417
 418        /*
 419         * __vmalloc allocates the data pages and auxiliary structures with
 420         * gfp_flags that were specified, but pagetables are always allocated
 421         * with GFP_KERNEL, no matter what was specified as gfp_mask.
 422         *
 423         * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
 424         * all allocations done by this process (including pagetables) are done
 425         * as if GFP_NOIO was specified.
 426         */
 427        if (gfp_mask & __GFP_NORETRY) {
 428                unsigned noio_flag = memalloc_noio_save();
 429                void *ptr = __vmalloc(c->block_size, gfp_mask);
 430
 431                memalloc_noio_restore(noio_flag);
 432                return ptr;
 433        }
 434
 435        return __vmalloc(c->block_size, gfp_mask);
 436}
 437
 438/*
 439 * Free buffer's data.
 440 */
 441static void free_buffer_data(struct dm_bufio_client *c,
 442                             void *data, unsigned char data_mode)
 443{
 444        switch (data_mode) {
 445        case DATA_MODE_SLAB:
 446                kmem_cache_free(c->slab_cache, data);
 447                break;
 448
 449        case DATA_MODE_GET_FREE_PAGES:
 450                free_pages((unsigned long)data,
 451                           c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
 452                break;
 453
 454        case DATA_MODE_VMALLOC:
 455                vfree(data);
 456                break;
 457
 458        default:
 459                DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
 460                       data_mode);
 461                BUG();
 462        }
 463}
 464
 465/*
 466 * Allocate buffer and its data.
 467 */
 468static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
 469{
 470        struct dm_buffer *b = kmem_cache_alloc(c->slab_buffer, gfp_mask);
 471
 472        if (!b)
 473                return NULL;
 474
 475        b->c = c;
 476
 477        b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
 478        if (!b->data) {
 479                kmem_cache_free(c->slab_buffer, b);
 480                return NULL;
 481        }
 482
 483#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
 484        b->stack_len = 0;
 485#endif
 486        return b;
 487}
 488
 489/*
 490 * Free buffer and its data.
 491 */
 492static void free_buffer(struct dm_buffer *b)
 493{
 494        struct dm_bufio_client *c = b->c;
 495
 496        free_buffer_data(c, b->data, b->data_mode);
 497        kmem_cache_free(c->slab_buffer, b);
 498}
 499
 500/*
 501 * Link buffer to the buffer tree and clean or dirty queue.
 502 */
 503static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
 504{
 505        struct dm_bufio_client *c = b->c;
 506
 507        c->n_buffers[dirty]++;
 508        b->block = block;
 509        b->list_mode = dirty;
 510        list_add(&b->lru_list, &c->lru[dirty]);
 511        __insert(b->c, b);
 512        b->last_accessed = jiffies;
 513
 514        adjust_total_allocated(b, false);
 515}
 516
 517/*
 518 * Unlink buffer from the buffer tree and dirty or clean queue.
 519 */
 520static void __unlink_buffer(struct dm_buffer *b)
 521{
 522        struct dm_bufio_client *c = b->c;
 523
 524        BUG_ON(!c->n_buffers[b->list_mode]);
 525
 526        c->n_buffers[b->list_mode]--;
 527        __remove(b->c, b);
 528        list_del(&b->lru_list);
 529
 530        adjust_total_allocated(b, true);
 531}
 532
 533/*
 534 * Place the buffer to the head of dirty or clean LRU queue.
 535 */
 536static void __relink_lru(struct dm_buffer *b, int dirty)
 537{
 538        struct dm_bufio_client *c = b->c;
 539
 540        b->accessed = 1;
 541
 542        BUG_ON(!c->n_buffers[b->list_mode]);
 543
 544        c->n_buffers[b->list_mode]--;
 545        c->n_buffers[dirty]++;
 546        b->list_mode = dirty;
 547        list_move(&b->lru_list, &c->lru[dirty]);
 548        b->last_accessed = jiffies;
 549}
 550
 551/*----------------------------------------------------------------
 552 * Submit I/O on the buffer.
 553 *
 554 * Bio interface is faster but it has some problems:
 555 *      the vector list is limited (increasing this limit increases
 556 *      memory-consumption per buffer, so it is not viable);
 557 *
 558 *      the memory must be direct-mapped, not vmalloced;
 559 *
 560 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
 561 * it is not vmalloced, try using the bio interface.
 562 *
 563 * If the buffer is big, if it is vmalloced or if the underlying device
 564 * rejects the bio because it is too large, use dm-io layer to do the I/O.
 565 * The dm-io layer splits the I/O into multiple requests, avoiding the above
 566 * shortcomings.
 567 *--------------------------------------------------------------*/
 568
 569/*
 570 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
 571 * that the request was handled directly with bio interface.
 572 */
 573static void dmio_complete(unsigned long error, void *context)
 574{
 575        struct dm_buffer *b = context;
 576
 577        b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0);
 578}
 579
 580static void use_dmio(struct dm_buffer *b, int rw, sector_t sector,
 581                     unsigned n_sectors, unsigned offset)
 582{
 583        int r;
 584        struct dm_io_request io_req = {
 585                .bi_op = rw,
 586                .bi_op_flags = 0,
 587                .notify.fn = dmio_complete,
 588                .notify.context = b,
 589                .client = b->c->dm_io,
 590        };
 591        struct dm_io_region region = {
 592                .bdev = b->c->bdev,
 593                .sector = sector,
 594                .count = n_sectors,
 595        };
 596
 597        if (b->data_mode != DATA_MODE_VMALLOC) {
 598                io_req.mem.type = DM_IO_KMEM;
 599                io_req.mem.ptr.addr = (char *)b->data + offset;
 600        } else {
 601                io_req.mem.type = DM_IO_VMA;
 602                io_req.mem.ptr.vma = (char *)b->data + offset;
 603        }
 604
 605        r = dm_io(&io_req, 1, &region, NULL);
 606        if (unlikely(r))
 607                b->end_io(b, errno_to_blk_status(r));
 608}
 609
 610static void bio_complete(struct bio *bio)
 611{
 612        struct dm_buffer *b = bio->bi_private;
 613        blk_status_t status = bio->bi_status;
 614        bio_put(bio);
 615        b->end_io(b, status);
 616}
 617
 618static void use_bio(struct dm_buffer *b, int rw, sector_t sector,
 619                    unsigned n_sectors, unsigned offset)
 620{
 621        struct bio *bio;
 622        char *ptr;
 623        unsigned vec_size, len;
 624
 625        vec_size = b->c->block_size >> PAGE_SHIFT;
 626        if (unlikely(b->c->sectors_per_block_bits < PAGE_SHIFT - SECTOR_SHIFT))
 627                vec_size += 2;
 628
 629        bio = bio_kmalloc(GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN, vec_size);
 630        if (!bio) {
 631dmio:
 632                use_dmio(b, rw, sector, n_sectors, offset);
 633                return;
 634        }
 635
 636        bio->bi_iter.bi_sector = sector;
 637        bio_set_dev(bio, b->c->bdev);
 638        bio_set_op_attrs(bio, rw, 0);
 639        bio->bi_end_io = bio_complete;
 640        bio->bi_private = b;
 641
 642        ptr = (char *)b->data + offset;
 643        len = n_sectors << SECTOR_SHIFT;
 644
 645        do {
 646                unsigned this_step = min((unsigned)(PAGE_SIZE - offset_in_page(ptr)), len);
 647                if (!bio_add_page(bio, virt_to_page(ptr), this_step,
 648                                  offset_in_page(ptr))) {
 649                        bio_put(bio);
 650                        goto dmio;
 651                }
 652
 653                len -= this_step;
 654                ptr += this_step;
 655        } while (len > 0);
 656
 657        submit_bio(bio);
 658}
 659
 660static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block)
 661{
 662        sector_t sector;
 663
 664        if (likely(c->sectors_per_block_bits >= 0))
 665                sector = block << c->sectors_per_block_bits;
 666        else
 667                sector = block * (c->block_size >> SECTOR_SHIFT);
 668        sector += c->start;
 669
 670        return sector;
 671}
 672
 673static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buffer *, blk_status_t))
 674{
 675        unsigned n_sectors;
 676        sector_t sector;
 677        unsigned offset, end;
 678
 679        b->end_io = end_io;
 680
 681        sector = block_to_sector(b->c, b->block);
 682
 683        if (rw != REQ_OP_WRITE) {
 684                n_sectors = b->c->block_size >> SECTOR_SHIFT;
 685                offset = 0;
 686        } else {
 687                if (b->c->write_callback)
 688                        b->c->write_callback(b);
 689                offset = b->write_start;
 690                end = b->write_end;
 691                offset &= -DM_BUFIO_WRITE_ALIGN;
 692                end += DM_BUFIO_WRITE_ALIGN - 1;
 693                end &= -DM_BUFIO_WRITE_ALIGN;
 694                if (unlikely(end > b->c->block_size))
 695                        end = b->c->block_size;
 696
 697                sector += offset >> SECTOR_SHIFT;
 698                n_sectors = (end - offset) >> SECTOR_SHIFT;
 699        }
 700
 701        if (b->data_mode != DATA_MODE_VMALLOC)
 702                use_bio(b, rw, sector, n_sectors, offset);
 703        else
 704                use_dmio(b, rw, sector, n_sectors, offset);
 705}
 706
 707/*----------------------------------------------------------------
 708 * Writing dirty buffers
 709 *--------------------------------------------------------------*/
 710
 711/*
 712 * The endio routine for write.
 713 *
 714 * Set the error, clear B_WRITING bit and wake anyone who was waiting on
 715 * it.
 716 */
 717static void write_endio(struct dm_buffer *b, blk_status_t status)
 718{
 719        b->write_error = status;
 720        if (unlikely(status)) {
 721                struct dm_bufio_client *c = b->c;
 722
 723                (void)cmpxchg(&c->async_write_error, 0,
 724                                blk_status_to_errno(status));
 725        }
 726
 727        BUG_ON(!test_bit(B_WRITING, &b->state));
 728
 729        smp_mb__before_atomic();
 730        clear_bit(B_WRITING, &b->state);
 731        smp_mb__after_atomic();
 732
 733        wake_up_bit(&b->state, B_WRITING);
 734}
 735
 736/*
 737 * Initiate a write on a dirty buffer, but don't wait for it.
 738 *
 739 * - If the buffer is not dirty, exit.
 740 * - If there some previous write going on, wait for it to finish (we can't
 741 *   have two writes on the same buffer simultaneously).
 742 * - Submit our write and don't wait on it. We set B_WRITING indicating
 743 *   that there is a write in progress.
 744 */
 745static void __write_dirty_buffer(struct dm_buffer *b,
 746                                 struct list_head *write_list)
 747{
 748        if (!test_bit(B_DIRTY, &b->state))
 749                return;
 750
 751        clear_bit(B_DIRTY, &b->state);
 752        wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
 753
 754        b->write_start = b->dirty_start;
 755        b->write_end = b->dirty_end;
 756
 757        if (!write_list)
 758                submit_io(b, REQ_OP_WRITE, write_endio);
 759        else
 760                list_add_tail(&b->write_list, write_list);
 761}
 762
 763static void __flush_write_list(struct list_head *write_list)
 764{
 765        struct blk_plug plug;
 766        blk_start_plug(&plug);
 767        while (!list_empty(write_list)) {
 768                struct dm_buffer *b =
 769                        list_entry(write_list->next, struct dm_buffer, write_list);
 770                list_del(&b->write_list);
 771                submit_io(b, REQ_OP_WRITE, write_endio);
 772                cond_resched();
 773        }
 774        blk_finish_plug(&plug);
 775}
 776
 777/*
 778 * Wait until any activity on the buffer finishes.  Possibly write the
 779 * buffer if it is dirty.  When this function finishes, there is no I/O
 780 * running on the buffer and the buffer is not dirty.
 781 */
 782static void __make_buffer_clean(struct dm_buffer *b)
 783{
 784        BUG_ON(b->hold_count);
 785
 786        if (!b->state)  /* fast case */
 787                return;
 788
 789        wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
 790        __write_dirty_buffer(b, NULL);
 791        wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
 792}
 793
 794/*
 795 * Find some buffer that is not held by anybody, clean it, unlink it and
 796 * return it.
 797 */
 798static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
 799{
 800        struct dm_buffer *b;
 801
 802        list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) {
 803                BUG_ON(test_bit(B_WRITING, &b->state));
 804                BUG_ON(test_bit(B_DIRTY, &b->state));
 805
 806                if (!b->hold_count) {
 807                        __make_buffer_clean(b);
 808                        __unlink_buffer(b);
 809                        return b;
 810                }
 811                cond_resched();
 812        }
 813
 814        list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
 815                BUG_ON(test_bit(B_READING, &b->state));
 816
 817                if (!b->hold_count) {
 818                        __make_buffer_clean(b);
 819                        __unlink_buffer(b);
 820                        return b;
 821                }
 822                cond_resched();
 823        }
 824
 825        return NULL;
 826}
 827
 828/*
 829 * Wait until some other threads free some buffer or release hold count on
 830 * some buffer.
 831 *
 832 * This function is entered with c->lock held, drops it and regains it
 833 * before exiting.
 834 */
 835static void __wait_for_free_buffer(struct dm_bufio_client *c)
 836{
 837        DECLARE_WAITQUEUE(wait, current);
 838
 839        add_wait_queue(&c->free_buffer_wait, &wait);
 840        set_current_state(TASK_UNINTERRUPTIBLE);
 841        dm_bufio_unlock(c);
 842
 843        io_schedule();
 844
 845        remove_wait_queue(&c->free_buffer_wait, &wait);
 846
 847        dm_bufio_lock(c);
 848}
 849
 850enum new_flag {
 851        NF_FRESH = 0,
 852        NF_READ = 1,
 853        NF_GET = 2,
 854        NF_PREFETCH = 3
 855};
 856
 857/*
 858 * Allocate a new buffer. If the allocation is not possible, wait until
 859 * some other thread frees a buffer.
 860 *
 861 * May drop the lock and regain it.
 862 */
 863static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
 864{
 865        struct dm_buffer *b;
 866        bool tried_noio_alloc = false;
 867
 868        /*
 869         * dm-bufio is resistant to allocation failures (it just keeps
 870         * one buffer reserved in cases all the allocations fail).
 871         * So set flags to not try too hard:
 872         *      GFP_NOWAIT: don't wait; if we need to sleep we'll release our
 873         *                  mutex and wait ourselves.
 874         *      __GFP_NORETRY: don't retry and rather return failure
 875         *      __GFP_NOMEMALLOC: don't use emergency reserves
 876         *      __GFP_NOWARN: don't print a warning in case of failure
 877         *
 878         * For debugging, if we set the cache size to 1, no new buffers will
 879         * be allocated.
 880         */
 881        while (1) {
 882                if (dm_bufio_cache_size_latch != 1) {
 883                        b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
 884                        if (b)
 885                                return b;
 886                }
 887
 888                if (nf == NF_PREFETCH)
 889                        return NULL;
 890
 891                if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) {
 892                        dm_bufio_unlock(c);
 893                        b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
 894                        dm_bufio_lock(c);
 895                        if (b)
 896                                return b;
 897                        tried_noio_alloc = true;
 898                }
 899
 900                if (!list_empty(&c->reserved_buffers)) {
 901                        b = list_entry(c->reserved_buffers.next,
 902                                       struct dm_buffer, lru_list);
 903                        list_del(&b->lru_list);
 904                        c->need_reserved_buffers++;
 905
 906                        return b;
 907                }
 908
 909                b = __get_unclaimed_buffer(c);
 910                if (b)
 911                        return b;
 912
 913                __wait_for_free_buffer(c);
 914        }
 915}
 916
 917static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
 918{
 919        struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
 920
 921        if (!b)
 922                return NULL;
 923
 924        if (c->alloc_callback)
 925                c->alloc_callback(b);
 926
 927        return b;
 928}
 929
 930/*
 931 * Free a buffer and wake other threads waiting for free buffers.
 932 */
 933static void __free_buffer_wake(struct dm_buffer *b)
 934{
 935        struct dm_bufio_client *c = b->c;
 936
 937        if (!c->need_reserved_buffers)
 938                free_buffer(b);
 939        else {
 940                list_add(&b->lru_list, &c->reserved_buffers);
 941                c->need_reserved_buffers--;
 942        }
 943
 944        wake_up(&c->free_buffer_wait);
 945}
 946
 947static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
 948                                        struct list_head *write_list)
 949{
 950        struct dm_buffer *b, *tmp;
 951
 952        list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
 953                BUG_ON(test_bit(B_READING, &b->state));
 954
 955                if (!test_bit(B_DIRTY, &b->state) &&
 956                    !test_bit(B_WRITING, &b->state)) {
 957                        __relink_lru(b, LIST_CLEAN);
 958                        continue;
 959                }
 960
 961                if (no_wait && test_bit(B_WRITING, &b->state))
 962                        return;
 963
 964                __write_dirty_buffer(b, write_list);
 965                cond_resched();
 966        }
 967}
 968
 969/*
 970 * Check if we're over watermark.
 971 * If we are over threshold_buffers, start freeing buffers.
 972 * If we're over "limit_buffers", block until we get under the limit.
 973 */
 974static void __check_watermark(struct dm_bufio_client *c,
 975                              struct list_head *write_list)
 976{
 977        if (c->n_buffers[LIST_DIRTY] > c->n_buffers[LIST_CLEAN] * DM_BUFIO_WRITEBACK_RATIO)
 978                __write_dirty_buffers_async(c, 1, write_list);
 979}
 980
 981/*----------------------------------------------------------------
 982 * Getting a buffer
 983 *--------------------------------------------------------------*/
 984
 985static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
 986                                     enum new_flag nf, int *need_submit,
 987                                     struct list_head *write_list)
 988{
 989        struct dm_buffer *b, *new_b = NULL;
 990
 991        *need_submit = 0;
 992
 993        b = __find(c, block);
 994        if (b)
 995                goto found_buffer;
 996
 997        if (nf == NF_GET)
 998                return NULL;
 999
1000        new_b = __alloc_buffer_wait(c, nf);
1001        if (!new_b)
1002                return NULL;
1003
1004        /*
1005         * We've had a period where the mutex was unlocked, so need to
1006         * recheck the buffer tree.
1007         */
1008        b = __find(c, block);
1009        if (b) {
1010                __free_buffer_wake(new_b);
1011                goto found_buffer;
1012        }
1013
1014        __check_watermark(c, write_list);
1015
1016        b = new_b;
1017        b->hold_count = 1;
1018        b->read_error = 0;
1019        b->write_error = 0;
1020        __link_buffer(b, block, LIST_CLEAN);
1021
1022        if (nf == NF_FRESH) {
1023                b->state = 0;
1024                return b;
1025        }
1026
1027        b->state = 1 << B_READING;
1028        *need_submit = 1;
1029
1030        return b;
1031
1032found_buffer:
1033        if (nf == NF_PREFETCH)
1034                return NULL;
1035        /*
1036         * Note: it is essential that we don't wait for the buffer to be
1037         * read if dm_bufio_get function is used. Both dm_bufio_get and
1038         * dm_bufio_prefetch can be used in the driver request routine.
1039         * If the user called both dm_bufio_prefetch and dm_bufio_get on
1040         * the same buffer, it would deadlock if we waited.
1041         */
1042        if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state)))
1043                return NULL;
1044
1045        b->hold_count++;
1046        __relink_lru(b, test_bit(B_DIRTY, &b->state) ||
1047                     test_bit(B_WRITING, &b->state));
1048        return b;
1049}
1050
1051/*
1052 * The endio routine for reading: set the error, clear the bit and wake up
1053 * anyone waiting on the buffer.
1054 */
1055static void read_endio(struct dm_buffer *b, blk_status_t status)
1056{
1057        b->read_error = status;
1058
1059        BUG_ON(!test_bit(B_READING, &b->state));
1060
1061        smp_mb__before_atomic();
1062        clear_bit(B_READING, &b->state);
1063        smp_mb__after_atomic();
1064
1065        wake_up_bit(&b->state, B_READING);
1066}
1067
1068/*
1069 * A common routine for dm_bufio_new and dm_bufio_read.  Operation of these
1070 * functions is similar except that dm_bufio_new doesn't read the
1071 * buffer from the disk (assuming that the caller overwrites all the data
1072 * and uses dm_bufio_mark_buffer_dirty to write new data back).
1073 */
1074static void *new_read(struct dm_bufio_client *c, sector_t block,
1075                      enum new_flag nf, struct dm_buffer **bp)
1076{
1077        int need_submit;
1078        struct dm_buffer *b;
1079
1080        LIST_HEAD(write_list);
1081
1082        dm_bufio_lock(c);
1083        b = __bufio_new(c, block, nf, &need_submit, &write_list);
1084#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1085        if (b && b->hold_count == 1)
1086                buffer_record_stack(b);
1087#endif
1088        dm_bufio_unlock(c);
1089
1090        __flush_write_list(&write_list);
1091
1092        if (!b)
1093                return NULL;
1094
1095        if (need_submit)
1096                submit_io(b, REQ_OP_READ, read_endio);
1097
1098        wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1099
1100        if (b->read_error) {
1101                int error = blk_status_to_errno(b->read_error);
1102
1103                dm_bufio_release(b);
1104
1105                return ERR_PTR(error);
1106        }
1107
1108        *bp = b;
1109
1110        return b->data;
1111}
1112
1113void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1114                   struct dm_buffer **bp)
1115{
1116        return new_read(c, block, NF_GET, bp);
1117}
1118EXPORT_SYMBOL_GPL(dm_bufio_get);
1119
1120void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1121                    struct dm_buffer **bp)
1122{
1123        BUG_ON(dm_bufio_in_request());
1124
1125        return new_read(c, block, NF_READ, bp);
1126}
1127EXPORT_SYMBOL_GPL(dm_bufio_read);
1128
1129void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1130                   struct dm_buffer **bp)
1131{
1132        BUG_ON(dm_bufio_in_request());
1133
1134        return new_read(c, block, NF_FRESH, bp);
1135}
1136EXPORT_SYMBOL_GPL(dm_bufio_new);
1137
1138void dm_bufio_prefetch(struct dm_bufio_client *c,
1139                       sector_t block, unsigned n_blocks)
1140{
1141        struct blk_plug plug;
1142
1143        LIST_HEAD(write_list);
1144
1145        BUG_ON(dm_bufio_in_request());
1146
1147        blk_start_plug(&plug);
1148        dm_bufio_lock(c);
1149
1150        for (; n_blocks--; block++) {
1151                int need_submit;
1152                struct dm_buffer *b;
1153                b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
1154                                &write_list);
1155                if (unlikely(!list_empty(&write_list))) {
1156                        dm_bufio_unlock(c);
1157                        blk_finish_plug(&plug);
1158                        __flush_write_list(&write_list);
1159                        blk_start_plug(&plug);
1160                        dm_bufio_lock(c);
1161                }
1162                if (unlikely(b != NULL)) {
1163                        dm_bufio_unlock(c);
1164
1165                        if (need_submit)
1166                                submit_io(b, REQ_OP_READ, read_endio);
1167                        dm_bufio_release(b);
1168
1169                        cond_resched();
1170
1171                        if (!n_blocks)
1172                                goto flush_plug;
1173                        dm_bufio_lock(c);
1174                }
1175        }
1176
1177        dm_bufio_unlock(c);
1178
1179flush_plug:
1180        blk_finish_plug(&plug);
1181}
1182EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
1183
1184void dm_bufio_release(struct dm_buffer *b)
1185{
1186        struct dm_bufio_client *c = b->c;
1187
1188        dm_bufio_lock(c);
1189
1190        BUG_ON(!b->hold_count);
1191
1192        b->hold_count--;
1193        if (!b->hold_count) {
1194                wake_up(&c->free_buffer_wait);
1195
1196                /*
1197                 * If there were errors on the buffer, and the buffer is not
1198                 * to be written, free the buffer. There is no point in caching
1199                 * invalid buffer.
1200                 */
1201                if ((b->read_error || b->write_error) &&
1202                    !test_bit(B_READING, &b->state) &&
1203                    !test_bit(B_WRITING, &b->state) &&
1204                    !test_bit(B_DIRTY, &b->state)) {
1205                        __unlink_buffer(b);
1206                        __free_buffer_wake(b);
1207                }
1208        }
1209
1210        dm_bufio_unlock(c);
1211}
1212EXPORT_SYMBOL_GPL(dm_bufio_release);
1213
1214void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
1215                                        unsigned start, unsigned end)
1216{
1217        struct dm_bufio_client *c = b->c;
1218
1219        BUG_ON(start >= end);
1220        BUG_ON(end > b->c->block_size);
1221
1222        dm_bufio_lock(c);
1223
1224        BUG_ON(test_bit(B_READING, &b->state));
1225
1226        if (!test_and_set_bit(B_DIRTY, &b->state)) {
1227                b->dirty_start = start;
1228                b->dirty_end = end;
1229                __relink_lru(b, LIST_DIRTY);
1230        } else {
1231                if (start < b->dirty_start)
1232                        b->dirty_start = start;
1233                if (end > b->dirty_end)
1234                        b->dirty_end = end;
1235        }
1236
1237        dm_bufio_unlock(c);
1238}
1239EXPORT_SYMBOL_GPL(dm_bufio_mark_partial_buffer_dirty);
1240
1241void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
1242{
1243        dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size);
1244}
1245EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
1246
1247void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
1248{
1249        LIST_HEAD(write_list);
1250
1251        BUG_ON(dm_bufio_in_request());
1252
1253        dm_bufio_lock(c);
1254        __write_dirty_buffers_async(c, 0, &write_list);
1255        dm_bufio_unlock(c);
1256        __flush_write_list(&write_list);
1257}
1258EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
1259
1260/*
1261 * For performance, it is essential that the buffers are written asynchronously
1262 * and simultaneously (so that the block layer can merge the writes) and then
1263 * waited upon.
1264 *
1265 * Finally, we flush hardware disk cache.
1266 */
1267int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
1268{
1269        int a, f;
1270        unsigned long buffers_processed = 0;
1271        struct dm_buffer *b, *tmp;
1272
1273        LIST_HEAD(write_list);
1274
1275        dm_bufio_lock(c);
1276        __write_dirty_buffers_async(c, 0, &write_list);
1277        dm_bufio_unlock(c);
1278        __flush_write_list(&write_list);
1279        dm_bufio_lock(c);
1280
1281again:
1282        list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
1283                int dropped_lock = 0;
1284
1285                if (buffers_processed < c->n_buffers[LIST_DIRTY])
1286                        buffers_processed++;
1287
1288                BUG_ON(test_bit(B_READING, &b->state));
1289
1290                if (test_bit(B_WRITING, &b->state)) {
1291                        if (buffers_processed < c->n_buffers[LIST_DIRTY]) {
1292                                dropped_lock = 1;
1293                                b->hold_count++;
1294                                dm_bufio_unlock(c);
1295                                wait_on_bit_io(&b->state, B_WRITING,
1296                                               TASK_UNINTERRUPTIBLE);
1297                                dm_bufio_lock(c);
1298                                b->hold_count--;
1299                        } else
1300                                wait_on_bit_io(&b->state, B_WRITING,
1301                                               TASK_UNINTERRUPTIBLE);
1302                }
1303
1304                if (!test_bit(B_DIRTY, &b->state) &&
1305                    !test_bit(B_WRITING, &b->state))
1306                        __relink_lru(b, LIST_CLEAN);
1307
1308                cond_resched();
1309
1310                /*
1311                 * If we dropped the lock, the list is no longer consistent,
1312                 * so we must restart the search.
1313                 *
1314                 * In the most common case, the buffer just processed is
1315                 * relinked to the clean list, so we won't loop scanning the
1316                 * same buffer again and again.
1317                 *
1318                 * This may livelock if there is another thread simultaneously
1319                 * dirtying buffers, so we count the number of buffers walked
1320                 * and if it exceeds the total number of buffers, it means that
1321                 * someone is doing some writes simultaneously with us.  In
1322                 * this case, stop, dropping the lock.
1323                 */
1324                if (dropped_lock)
1325                        goto again;
1326        }
1327        wake_up(&c->free_buffer_wait);
1328        dm_bufio_unlock(c);
1329
1330        a = xchg(&c->async_write_error, 0);
1331        f = dm_bufio_issue_flush(c);
1332        if (a)
1333                return a;
1334
1335        return f;
1336}
1337EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
1338
1339/*
1340 * Use dm-io to send an empty barrier to flush the device.
1341 */
1342int dm_bufio_issue_flush(struct dm_bufio_client *c)
1343{
1344        struct dm_io_request io_req = {
1345                .bi_op = REQ_OP_WRITE,
1346                .bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
1347                .mem.type = DM_IO_KMEM,
1348                .mem.ptr.addr = NULL,
1349                .client = c->dm_io,
1350        };
1351        struct dm_io_region io_reg = {
1352                .bdev = c->bdev,
1353                .sector = 0,
1354                .count = 0,
1355        };
1356
1357        BUG_ON(dm_bufio_in_request());
1358
1359        return dm_io(&io_req, 1, &io_reg, NULL);
1360}
1361EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
1362
1363/*
1364 * Use dm-io to send a discard request to flush the device.
1365 */
1366int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count)
1367{
1368        struct dm_io_request io_req = {
1369                .bi_op = REQ_OP_DISCARD,
1370                .bi_op_flags = REQ_SYNC,
1371                .mem.type = DM_IO_KMEM,
1372                .mem.ptr.addr = NULL,
1373                .client = c->dm_io,
1374        };
1375        struct dm_io_region io_reg = {
1376                .bdev = c->bdev,
1377                .sector = block_to_sector(c, block),
1378                .count = block_to_sector(c, count),
1379        };
1380
1381        BUG_ON(dm_bufio_in_request());
1382
1383        return dm_io(&io_req, 1, &io_reg, NULL);
1384}
1385EXPORT_SYMBOL_GPL(dm_bufio_issue_discard);
1386
1387/*
1388 * We first delete any other buffer that may be at that new location.
1389 *
1390 * Then, we write the buffer to the original location if it was dirty.
1391 *
1392 * Then, if we are the only one who is holding the buffer, relink the buffer
1393 * in the buffer tree for the new location.
1394 *
1395 * If there was someone else holding the buffer, we write it to the new
1396 * location but not relink it, because that other user needs to have the buffer
1397 * at the same place.
1398 */
1399void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
1400{
1401        struct dm_bufio_client *c = b->c;
1402        struct dm_buffer *new;
1403
1404        BUG_ON(dm_bufio_in_request());
1405
1406        dm_bufio_lock(c);
1407
1408retry:
1409        new = __find(c, new_block);
1410        if (new) {
1411                if (new->hold_count) {
1412                        __wait_for_free_buffer(c);
1413                        goto retry;
1414                }
1415
1416                /*
1417                 * FIXME: Is there any point waiting for a write that's going
1418                 * to be overwritten in a bit?
1419                 */
1420                __make_buffer_clean(new);
1421                __unlink_buffer(new);
1422                __free_buffer_wake(new);
1423        }
1424
1425        BUG_ON(!b->hold_count);
1426        BUG_ON(test_bit(B_READING, &b->state));
1427
1428        __write_dirty_buffer(b, NULL);
1429        if (b->hold_count == 1) {
1430                wait_on_bit_io(&b->state, B_WRITING,
1431                               TASK_UNINTERRUPTIBLE);
1432                set_bit(B_DIRTY, &b->state);
1433                b->dirty_start = 0;
1434                b->dirty_end = c->block_size;
1435                __unlink_buffer(b);
1436                __link_buffer(b, new_block, LIST_DIRTY);
1437        } else {
1438                sector_t old_block;
1439                wait_on_bit_lock_io(&b->state, B_WRITING,
1440                                    TASK_UNINTERRUPTIBLE);
1441                /*
1442                 * Relink buffer to "new_block" so that write_callback
1443                 * sees "new_block" as a block number.
1444                 * After the write, link the buffer back to old_block.
1445                 * All this must be done in bufio lock, so that block number
1446                 * change isn't visible to other threads.
1447                 */
1448                old_block = b->block;
1449                __unlink_buffer(b);
1450                __link_buffer(b, new_block, b->list_mode);
1451                submit_io(b, REQ_OP_WRITE, write_endio);
1452                wait_on_bit_io(&b->state, B_WRITING,
1453                               TASK_UNINTERRUPTIBLE);
1454                __unlink_buffer(b);
1455                __link_buffer(b, old_block, b->list_mode);
1456        }
1457
1458        dm_bufio_unlock(c);
1459        dm_bufio_release(b);
1460}
1461EXPORT_SYMBOL_GPL(dm_bufio_release_move);
1462
1463static void forget_buffer_locked(struct dm_buffer *b)
1464{
1465        if (likely(!b->hold_count) && likely(!b->state)) {
1466                __unlink_buffer(b);
1467                __free_buffer_wake(b);
1468        }
1469}
1470
1471/*
1472 * Free the given buffer.
1473 *
1474 * This is just a hint, if the buffer is in use or dirty, this function
1475 * does nothing.
1476 */
1477void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
1478{
1479        struct dm_buffer *b;
1480
1481        dm_bufio_lock(c);
1482
1483        b = __find(c, block);
1484        if (b)
1485                forget_buffer_locked(b);
1486
1487        dm_bufio_unlock(c);
1488}
1489EXPORT_SYMBOL_GPL(dm_bufio_forget);
1490
1491void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks)
1492{
1493        struct dm_buffer *b;
1494        sector_t end_block = block + n_blocks;
1495
1496        while (block < end_block) {
1497                dm_bufio_lock(c);
1498
1499                b = __find_next(c, block);
1500                if (b) {
1501                        block = b->block + 1;
1502                        forget_buffer_locked(b);
1503                }
1504
1505                dm_bufio_unlock(c);
1506
1507                if (!b)
1508                        break;
1509        }
1510
1511}
1512EXPORT_SYMBOL_GPL(dm_bufio_forget_buffers);
1513
1514void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n)
1515{
1516        c->minimum_buffers = n;
1517}
1518EXPORT_SYMBOL_GPL(dm_bufio_set_minimum_buffers);
1519
1520unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
1521{
1522        return c->block_size;
1523}
1524EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
1525
1526sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
1527{
1528        sector_t s = i_size_read(c->bdev->bd_inode) >> SECTOR_SHIFT;
1529        if (s >= c->start)
1530                s -= c->start;
1531        else
1532                s = 0;
1533        if (likely(c->sectors_per_block_bits >= 0))
1534                s >>= c->sectors_per_block_bits;
1535        else
1536                sector_div(s, c->block_size >> SECTOR_SHIFT);
1537        return s;
1538}
1539EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
1540
1541struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c)
1542{
1543        return c->dm_io;
1544}
1545EXPORT_SYMBOL_GPL(dm_bufio_get_dm_io_client);
1546
1547sector_t dm_bufio_get_block_number(struct dm_buffer *b)
1548{
1549        return b->block;
1550}
1551EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
1552
1553void *dm_bufio_get_block_data(struct dm_buffer *b)
1554{
1555        return b->data;
1556}
1557EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
1558
1559void *dm_bufio_get_aux_data(struct dm_buffer *b)
1560{
1561        return b + 1;
1562}
1563EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
1564
1565struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
1566{
1567        return b->c;
1568}
1569EXPORT_SYMBOL_GPL(dm_bufio_get_client);
1570
1571static void drop_buffers(struct dm_bufio_client *c)
1572{
1573        struct dm_buffer *b;
1574        int i;
1575        bool warned = false;
1576
1577        BUG_ON(dm_bufio_in_request());
1578
1579        /*
1580         * An optimization so that the buffers are not written one-by-one.
1581         */
1582        dm_bufio_write_dirty_buffers_async(c);
1583
1584        dm_bufio_lock(c);
1585
1586        while ((b = __get_unclaimed_buffer(c)))
1587                __free_buffer_wake(b);
1588
1589        for (i = 0; i < LIST_SIZE; i++)
1590                list_for_each_entry(b, &c->lru[i], lru_list) {
1591                        WARN_ON(!warned);
1592                        warned = true;
1593                        DMERR("leaked buffer %llx, hold count %u, list %d",
1594                              (unsigned long long)b->block, b->hold_count, i);
1595#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1596                        stack_trace_print(b->stack_entries, b->stack_len, 1);
1597                        /* mark unclaimed to avoid BUG_ON below */
1598                        b->hold_count = 0;
1599#endif
1600                }
1601
1602#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1603        while ((b = __get_unclaimed_buffer(c)))
1604                __free_buffer_wake(b);
1605#endif
1606
1607        for (i = 0; i < LIST_SIZE; i++)
1608                BUG_ON(!list_empty(&c->lru[i]));
1609
1610        dm_bufio_unlock(c);
1611}
1612
1613/*
1614 * We may not be able to evict this buffer if IO pending or the client
1615 * is still using it.  Caller is expected to know buffer is too old.
1616 *
1617 * And if GFP_NOFS is used, we must not do any I/O because we hold
1618 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
1619 * rerouted to different bufio client.
1620 */
1621static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
1622{
1623        if (!(gfp & __GFP_FS)) {
1624                if (test_bit(B_READING, &b->state) ||
1625                    test_bit(B_WRITING, &b->state) ||
1626                    test_bit(B_DIRTY, &b->state))
1627                        return false;
1628        }
1629
1630        if (b->hold_count)
1631                return false;
1632
1633        __make_buffer_clean(b);
1634        __unlink_buffer(b);
1635        __free_buffer_wake(b);
1636
1637        return true;
1638}
1639
1640static unsigned long get_retain_buffers(struct dm_bufio_client *c)
1641{
1642        unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes);
1643        if (likely(c->sectors_per_block_bits >= 0))
1644                retain_bytes >>= c->sectors_per_block_bits + SECTOR_SHIFT;
1645        else
1646                retain_bytes /= c->block_size;
1647        return retain_bytes;
1648}
1649
1650static void __scan(struct dm_bufio_client *c)
1651{
1652        int l;
1653        struct dm_buffer *b, *tmp;
1654        unsigned long freed = 0;
1655        unsigned long count = c->n_buffers[LIST_CLEAN] +
1656                              c->n_buffers[LIST_DIRTY];
1657        unsigned long retain_target = get_retain_buffers(c);
1658
1659        for (l = 0; l < LIST_SIZE; l++) {
1660                list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
1661                        if (count - freed <= retain_target)
1662                                atomic_long_set(&c->need_shrink, 0);
1663                        if (!atomic_long_read(&c->need_shrink))
1664                                return;
1665                        if (__try_evict_buffer(b, GFP_KERNEL)) {
1666                                atomic_long_dec(&c->need_shrink);
1667                                freed++;
1668                        }
1669                        cond_resched();
1670                }
1671        }
1672}
1673
1674static void shrink_work(struct work_struct *w)
1675{
1676        struct dm_bufio_client *c = container_of(w, struct dm_bufio_client, shrink_work);
1677
1678        dm_bufio_lock(c);
1679        __scan(c);
1680        dm_bufio_unlock(c);
1681}
1682
1683static unsigned long dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1684{
1685        struct dm_bufio_client *c;
1686
1687        c = container_of(shrink, struct dm_bufio_client, shrinker);
1688        atomic_long_add(sc->nr_to_scan, &c->need_shrink);
1689        queue_work(dm_bufio_wq, &c->shrink_work);
1690
1691        return sc->nr_to_scan;
1692}
1693
1694static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1695{
1696        struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
1697        unsigned long count = READ_ONCE(c->n_buffers[LIST_CLEAN]) +
1698                              READ_ONCE(c->n_buffers[LIST_DIRTY]);
1699        unsigned long retain_target = get_retain_buffers(c);
1700        unsigned long queued_for_cleanup = atomic_long_read(&c->need_shrink);
1701
1702        if (unlikely(count < retain_target))
1703                count = 0;
1704        else
1705                count -= retain_target;
1706
1707        if (unlikely(count < queued_for_cleanup))
1708                count = 0;
1709        else
1710                count -= queued_for_cleanup;
1711
1712        return count;
1713}
1714
1715/*
1716 * Create the buffering interface
1717 */
1718struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
1719                                               unsigned reserved_buffers, unsigned aux_size,
1720                                               void (*alloc_callback)(struct dm_buffer *),
1721                                               void (*write_callback)(struct dm_buffer *))
1722{
1723        int r;
1724        struct dm_bufio_client *c;
1725        unsigned i;
1726        char slab_name[27];
1727
1728        if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) {
1729                DMERR("%s: block size not specified or is not multiple of 512b", __func__);
1730                r = -EINVAL;
1731                goto bad_client;
1732        }
1733
1734        c = kzalloc(sizeof(*c), GFP_KERNEL);
1735        if (!c) {
1736                r = -ENOMEM;
1737                goto bad_client;
1738        }
1739        c->buffer_tree = RB_ROOT;
1740
1741        c->bdev = bdev;
1742        c->block_size = block_size;
1743        if (is_power_of_2(block_size))
1744                c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT;
1745        else
1746                c->sectors_per_block_bits = -1;
1747
1748        c->alloc_callback = alloc_callback;
1749        c->write_callback = write_callback;
1750
1751        for (i = 0; i < LIST_SIZE; i++) {
1752                INIT_LIST_HEAD(&c->lru[i]);
1753                c->n_buffers[i] = 0;
1754        }
1755
1756        mutex_init(&c->lock);
1757        INIT_LIST_HEAD(&c->reserved_buffers);
1758        c->need_reserved_buffers = reserved_buffers;
1759
1760        dm_bufio_set_minimum_buffers(c, DM_BUFIO_MIN_BUFFERS);
1761
1762        init_waitqueue_head(&c->free_buffer_wait);
1763        c->async_write_error = 0;
1764
1765        c->dm_io = dm_io_client_create();
1766        if (IS_ERR(c->dm_io)) {
1767                r = PTR_ERR(c->dm_io);
1768                goto bad_dm_io;
1769        }
1770
1771        if (block_size <= KMALLOC_MAX_SIZE &&
1772            (block_size < PAGE_SIZE || !is_power_of_2(block_size))) {
1773                unsigned align = min(1U << __ffs(block_size), (unsigned)PAGE_SIZE);
1774                snprintf(slab_name, sizeof slab_name, "dm_bufio_cache-%u", block_size);
1775                c->slab_cache = kmem_cache_create(slab_name, block_size, align,
1776                                                  SLAB_RECLAIM_ACCOUNT, NULL);
1777                if (!c->slab_cache) {
1778                        r = -ENOMEM;
1779                        goto bad;
1780                }
1781        }
1782        if (aux_size)
1783                snprintf(slab_name, sizeof slab_name, "dm_bufio_buffer-%u", aux_size);
1784        else
1785                snprintf(slab_name, sizeof slab_name, "dm_bufio_buffer");
1786        c->slab_buffer = kmem_cache_create(slab_name, sizeof(struct dm_buffer) + aux_size,
1787                                           0, SLAB_RECLAIM_ACCOUNT, NULL);
1788        if (!c->slab_buffer) {
1789                r = -ENOMEM;
1790                goto bad;
1791        }
1792
1793        while (c->need_reserved_buffers) {
1794                struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
1795
1796                if (!b) {
1797                        r = -ENOMEM;
1798                        goto bad;
1799                }
1800                __free_buffer_wake(b);
1801        }
1802
1803        INIT_WORK(&c->shrink_work, shrink_work);
1804        atomic_long_set(&c->need_shrink, 0);
1805
1806        c->shrinker.count_objects = dm_bufio_shrink_count;
1807        c->shrinker.scan_objects = dm_bufio_shrink_scan;
1808        c->shrinker.seeks = 1;
1809        c->shrinker.batch = 0;
1810        r = register_shrinker(&c->shrinker);
1811        if (r)
1812                goto bad;
1813
1814        mutex_lock(&dm_bufio_clients_lock);
1815        dm_bufio_client_count++;
1816        list_add(&c->client_list, &dm_bufio_all_clients);
1817        __cache_size_refresh();
1818        mutex_unlock(&dm_bufio_clients_lock);
1819
1820        return c;
1821
1822bad:
1823        while (!list_empty(&c->reserved_buffers)) {
1824                struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1825                                                 struct dm_buffer, lru_list);
1826                list_del(&b->lru_list);
1827                free_buffer(b);
1828        }
1829        kmem_cache_destroy(c->slab_cache);
1830        kmem_cache_destroy(c->slab_buffer);
1831        dm_io_client_destroy(c->dm_io);
1832bad_dm_io:
1833        mutex_destroy(&c->lock);
1834        kfree(c);
1835bad_client:
1836        return ERR_PTR(r);
1837}
1838EXPORT_SYMBOL_GPL(dm_bufio_client_create);
1839
1840/*
1841 * Free the buffering interface.
1842 * It is required that there are no references on any buffers.
1843 */
1844void dm_bufio_client_destroy(struct dm_bufio_client *c)
1845{
1846        unsigned i;
1847
1848        drop_buffers(c);
1849
1850        unregister_shrinker(&c->shrinker);
1851        flush_work(&c->shrink_work);
1852
1853        mutex_lock(&dm_bufio_clients_lock);
1854
1855        list_del(&c->client_list);
1856        dm_bufio_client_count--;
1857        __cache_size_refresh();
1858
1859        mutex_unlock(&dm_bufio_clients_lock);
1860
1861        BUG_ON(!RB_EMPTY_ROOT(&c->buffer_tree));
1862        BUG_ON(c->need_reserved_buffers);
1863
1864        while (!list_empty(&c->reserved_buffers)) {
1865                struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1866                                                 struct dm_buffer, lru_list);
1867                list_del(&b->lru_list);
1868                free_buffer(b);
1869        }
1870
1871        for (i = 0; i < LIST_SIZE; i++)
1872                if (c->n_buffers[i])
1873                        DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]);
1874
1875        for (i = 0; i < LIST_SIZE; i++)
1876                BUG_ON(c->n_buffers[i]);
1877
1878        kmem_cache_destroy(c->slab_cache);
1879        kmem_cache_destroy(c->slab_buffer);
1880        dm_io_client_destroy(c->dm_io);
1881        mutex_destroy(&c->lock);
1882        kfree(c);
1883}
1884EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
1885
1886void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start)
1887{
1888        c->start = start;
1889}
1890EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
1891
1892static unsigned get_max_age_hz(void)
1893{
1894        unsigned max_age = READ_ONCE(dm_bufio_max_age);
1895
1896        if (max_age > UINT_MAX / HZ)
1897                max_age = UINT_MAX / HZ;
1898
1899        return max_age * HZ;
1900}
1901
1902static bool older_than(struct dm_buffer *b, unsigned long age_hz)
1903{
1904        return time_after_eq(jiffies, b->last_accessed + age_hz);
1905}
1906
1907static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
1908{
1909        struct dm_buffer *b, *tmp;
1910        unsigned long retain_target = get_retain_buffers(c);
1911        unsigned long count;
1912        LIST_HEAD(write_list);
1913
1914        dm_bufio_lock(c);
1915
1916        __check_watermark(c, &write_list);
1917        if (unlikely(!list_empty(&write_list))) {
1918                dm_bufio_unlock(c);
1919                __flush_write_list(&write_list);
1920                dm_bufio_lock(c);
1921        }
1922
1923        count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1924        list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) {
1925                if (count <= retain_target)
1926                        break;
1927
1928                if (!older_than(b, age_hz))
1929                        break;
1930
1931                if (__try_evict_buffer(b, 0))
1932                        count--;
1933
1934                cond_resched();
1935        }
1936
1937        dm_bufio_unlock(c);
1938}
1939
1940static void do_global_cleanup(struct work_struct *w)
1941{
1942        struct dm_bufio_client *locked_client = NULL;
1943        struct dm_bufio_client *current_client;
1944        struct dm_buffer *b;
1945        unsigned spinlock_hold_count;
1946        unsigned long threshold = dm_bufio_cache_size -
1947                dm_bufio_cache_size / DM_BUFIO_LOW_WATERMARK_RATIO;
1948        unsigned long loops = global_num * 2;
1949
1950        mutex_lock(&dm_bufio_clients_lock);
1951
1952        while (1) {
1953                cond_resched();
1954
1955                spin_lock(&global_spinlock);
1956                if (unlikely(dm_bufio_current_allocated <= threshold))
1957                        break;
1958
1959                spinlock_hold_count = 0;
1960get_next:
1961                if (!loops--)
1962                        break;
1963                if (unlikely(list_empty(&global_queue)))
1964                        break;
1965                b = list_entry(global_queue.prev, struct dm_buffer, global_list);
1966
1967                if (b->accessed) {
1968                        b->accessed = 0;
1969                        list_move(&b->global_list, &global_queue);
1970                        if (likely(++spinlock_hold_count < 16))
1971                                goto get_next;
1972                        spin_unlock(&global_spinlock);
1973                        continue;
1974                }
1975
1976                current_client = b->c;
1977                if (unlikely(current_client != locked_client)) {
1978                        if (locked_client)
1979                                dm_bufio_unlock(locked_client);
1980
1981                        if (!dm_bufio_trylock(current_client)) {
1982                                spin_unlock(&global_spinlock);
1983                                dm_bufio_lock(current_client);
1984                                locked_client = current_client;
1985                                continue;
1986                        }
1987
1988                        locked_client = current_client;
1989                }
1990
1991                spin_unlock(&global_spinlock);
1992
1993                if (unlikely(!__try_evict_buffer(b, GFP_KERNEL))) {
1994                        spin_lock(&global_spinlock);
1995                        list_move(&b->global_list, &global_queue);
1996                        spin_unlock(&global_spinlock);
1997                }
1998        }
1999
2000        spin_unlock(&global_spinlock);
2001
2002        if (locked_client)
2003                dm_bufio_unlock(locked_client);
2004
2005        mutex_unlock(&dm_bufio_clients_lock);
2006}
2007
2008static void cleanup_old_buffers(void)
2009{
2010        unsigned long max_age_hz = get_max_age_hz();
2011        struct dm_bufio_client *c;
2012
2013        mutex_lock(&dm_bufio_clients_lock);
2014
2015        __cache_size_refresh();
2016
2017        list_for_each_entry(c, &dm_bufio_all_clients, client_list)
2018                __evict_old_buffers(c, max_age_hz);
2019
2020        mutex_unlock(&dm_bufio_clients_lock);
2021}
2022
2023static void work_fn(struct work_struct *w)
2024{
2025        cleanup_old_buffers();
2026
2027        queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
2028                           DM_BUFIO_WORK_TIMER_SECS * HZ);
2029}
2030
2031/*----------------------------------------------------------------
2032 * Module setup
2033 *--------------------------------------------------------------*/
2034
2035/*
2036 * This is called only once for the whole dm_bufio module.
2037 * It initializes memory limit.
2038 */
2039static int __init dm_bufio_init(void)
2040{
2041        __u64 mem;
2042
2043        dm_bufio_allocated_kmem_cache = 0;
2044        dm_bufio_allocated_get_free_pages = 0;
2045        dm_bufio_allocated_vmalloc = 0;
2046        dm_bufio_current_allocated = 0;
2047
2048        mem = (__u64)mult_frac(totalram_pages() - totalhigh_pages(),
2049                               DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
2050
2051        if (mem > ULONG_MAX)
2052                mem = ULONG_MAX;
2053
2054#ifdef CONFIG_MMU
2055        if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
2056                mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
2057#endif
2058
2059        dm_bufio_default_cache_size = mem;
2060
2061        mutex_lock(&dm_bufio_clients_lock);
2062        __cache_size_refresh();
2063        mutex_unlock(&dm_bufio_clients_lock);
2064
2065        dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0);
2066        if (!dm_bufio_wq)
2067                return -ENOMEM;
2068
2069        INIT_DELAYED_WORK(&dm_bufio_cleanup_old_work, work_fn);
2070        INIT_WORK(&dm_bufio_replacement_work, do_global_cleanup);
2071        queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
2072                           DM_BUFIO_WORK_TIMER_SECS * HZ);
2073
2074        return 0;
2075}
2076
2077/*
2078 * This is called once when unloading the dm_bufio module.
2079 */
2080static void __exit dm_bufio_exit(void)
2081{
2082        int bug = 0;
2083
2084        cancel_delayed_work_sync(&dm_bufio_cleanup_old_work);
2085        flush_workqueue(dm_bufio_wq);
2086        destroy_workqueue(dm_bufio_wq);
2087
2088        if (dm_bufio_client_count) {
2089                DMCRIT("%s: dm_bufio_client_count leaked: %d",
2090                        __func__, dm_bufio_client_count);
2091                bug = 1;
2092        }
2093
2094        if (dm_bufio_current_allocated) {
2095                DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
2096                        __func__, dm_bufio_current_allocated);
2097                bug = 1;
2098        }
2099
2100        if (dm_bufio_allocated_get_free_pages) {
2101                DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
2102                       __func__, dm_bufio_allocated_get_free_pages);
2103                bug = 1;
2104        }
2105
2106        if (dm_bufio_allocated_vmalloc) {
2107                DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
2108                       __func__, dm_bufio_allocated_vmalloc);
2109                bug = 1;
2110        }
2111
2112        BUG_ON(bug);
2113}
2114
2115module_init(dm_bufio_init)
2116module_exit(dm_bufio_exit)
2117
2118module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR);
2119MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
2120
2121module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
2122MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
2123
2124module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR);
2125MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
2126
2127module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
2128MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
2129
2130module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO);
2131MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
2132
2133module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO);
2134MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
2135
2136module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO);
2137MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
2138
2139module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO);
2140MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
2141
2142MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
2143MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
2144MODULE_LICENSE("GPL");
2145