linux/drivers/md/dm-bufio.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2009-2011 Red Hat, Inc.
   3 *
   4 * Author: Mikulas Patocka <mpatocka@redhat.com>
   5 *
   6 * This file is released under the GPL.
   7 */
   8
   9#include <linux/dm-bufio.h>
  10
  11#include <linux/device-mapper.h>
  12#include <linux/dm-io.h>
  13#include <linux/slab.h>
  14#include <linux/sched/mm.h>
  15#include <linux/jiffies.h>
  16#include <linux/vmalloc.h>
  17#include <linux/shrinker.h>
  18#include <linux/module.h>
  19#include <linux/rbtree.h>
  20#include <linux/stacktrace.h>
  21
  22#define DM_MSG_PREFIX "bufio"
  23
  24/*
  25 * Memory management policy:
  26 *      Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
  27 *      or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
  28 *      Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
  29 *      Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
  30 *      dirty buffers.
  31 */
  32#define DM_BUFIO_MIN_BUFFERS            8
  33
  34#define DM_BUFIO_MEMORY_PERCENT         2
  35#define DM_BUFIO_VMALLOC_PERCENT        25
  36#define DM_BUFIO_WRITEBACK_RATIO        3
  37#define DM_BUFIO_LOW_WATERMARK_RATIO    16
  38
  39/*
  40 * Check buffer ages in this interval (seconds)
  41 */
  42#define DM_BUFIO_WORK_TIMER_SECS        30
  43
  44/*
  45 * Free buffers when they are older than this (seconds)
  46 */
  47#define DM_BUFIO_DEFAULT_AGE_SECS       300
  48
  49/*
  50 * The nr of bytes of cached data to keep around.
  51 */
  52#define DM_BUFIO_DEFAULT_RETAIN_BYTES   (256 * 1024)
  53
  54/*
  55 * Align buffer writes to this boundary.
  56 * Tests show that SSDs have the highest IOPS when using 4k writes.
  57 */
  58#define DM_BUFIO_WRITE_ALIGN            4096
  59
  60/*
  61 * dm_buffer->list_mode
  62 */
  63#define LIST_CLEAN      0
  64#define LIST_DIRTY      1
  65#define LIST_SIZE       2
  66
  67/*
  68 * Linking of buffers:
  69 *      All buffers are linked to buffer_tree with their node field.
  70 *
  71 *      Clean buffers that are not being written (B_WRITING not set)
  72 *      are linked to lru[LIST_CLEAN] with their lru_list field.
  73 *
  74 *      Dirty and clean buffers that are being written are linked to
  75 *      lru[LIST_DIRTY] with their lru_list field. When the write
  76 *      finishes, the buffer cannot be relinked immediately (because we
  77 *      are in an interrupt context and relinking requires process
  78 *      context), so some clean-not-writing buffers can be held on
  79 *      dirty_lru too.  They are later added to lru in the process
  80 *      context.
  81 */
  82struct dm_bufio_client {
  83        struct mutex lock;
  84
  85        struct list_head lru[LIST_SIZE];
  86        unsigned long n_buffers[LIST_SIZE];
  87
  88        struct block_device *bdev;
  89        unsigned block_size;
  90        s8 sectors_per_block_bits;
  91        void (*alloc_callback)(struct dm_buffer *);
  92        void (*write_callback)(struct dm_buffer *);
  93
  94        struct kmem_cache *slab_buffer;
  95        struct kmem_cache *slab_cache;
  96        struct dm_io_client *dm_io;
  97
  98        struct list_head reserved_buffers;
  99        unsigned need_reserved_buffers;
 100
 101        unsigned minimum_buffers;
 102
 103        struct rb_root buffer_tree;
 104        wait_queue_head_t free_buffer_wait;
 105
 106        sector_t start;
 107
 108        int async_write_error;
 109
 110        struct list_head client_list;
 111        struct shrinker shrinker;
 112};
 113
 114/*
 115 * Buffer state bits.
 116 */
 117#define B_READING       0
 118#define B_WRITING       1
 119#define B_DIRTY         2
 120
 121/*
 122 * Describes how the block was allocated:
 123 * kmem_cache_alloc(), __get_free_pages() or vmalloc().
 124 * See the comment at alloc_buffer_data.
 125 */
 126enum data_mode {
 127        DATA_MODE_SLAB = 0,
 128        DATA_MODE_GET_FREE_PAGES = 1,
 129        DATA_MODE_VMALLOC = 2,
 130        DATA_MODE_LIMIT = 3
 131};
 132
 133struct dm_buffer {
 134        struct rb_node node;
 135        struct list_head lru_list;
 136        struct list_head global_list;
 137        sector_t block;
 138        void *data;
 139        unsigned char data_mode;                /* DATA_MODE_* */
 140        unsigned char list_mode;                /* LIST_* */
 141        blk_status_t read_error;
 142        blk_status_t write_error;
 143        unsigned accessed;
 144        unsigned hold_count;
 145        unsigned long state;
 146        unsigned long last_accessed;
 147        unsigned dirty_start;
 148        unsigned dirty_end;
 149        unsigned write_start;
 150        unsigned write_end;
 151        struct dm_bufio_client *c;
 152        struct list_head write_list;
 153        void (*end_io)(struct dm_buffer *, blk_status_t);
 154#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
 155#define MAX_STACK 10
 156        unsigned int stack_len;
 157        unsigned long stack_entries[MAX_STACK];
 158#endif
 159};
 160
 161/*----------------------------------------------------------------*/
 162
 163#define dm_bufio_in_request()   (!!current->bio_list)
 164
 165static void dm_bufio_lock(struct dm_bufio_client *c)
 166{
 167        mutex_lock_nested(&c->lock, dm_bufio_in_request());
 168}
 169
 170static int dm_bufio_trylock(struct dm_bufio_client *c)
 171{
 172        return mutex_trylock(&c->lock);
 173}
 174
 175static void dm_bufio_unlock(struct dm_bufio_client *c)
 176{
 177        mutex_unlock(&c->lock);
 178}
 179
 180/*----------------------------------------------------------------*/
 181
 182/*
 183 * Default cache size: available memory divided by the ratio.
 184 */
 185static unsigned long dm_bufio_default_cache_size;
 186
 187/*
 188 * Total cache size set by the user.
 189 */
 190static unsigned long dm_bufio_cache_size;
 191
 192/*
 193 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
 194 * at any time.  If it disagrees, the user has changed cache size.
 195 */
 196static unsigned long dm_bufio_cache_size_latch;
 197
 198static DEFINE_SPINLOCK(global_spinlock);
 199
 200static LIST_HEAD(global_queue);
 201
 202static unsigned long global_num = 0;
 203
 204/*
 205 * Buffers are freed after this timeout
 206 */
 207static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
 208static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
 209
 210static unsigned long dm_bufio_peak_allocated;
 211static unsigned long dm_bufio_allocated_kmem_cache;
 212static unsigned long dm_bufio_allocated_get_free_pages;
 213static unsigned long dm_bufio_allocated_vmalloc;
 214static unsigned long dm_bufio_current_allocated;
 215
 216/*----------------------------------------------------------------*/
 217
 218/*
 219 * The current number of clients.
 220 */
 221static int dm_bufio_client_count;
 222
 223/*
 224 * The list of all clients.
 225 */
 226static LIST_HEAD(dm_bufio_all_clients);
 227
 228/*
 229 * This mutex protects dm_bufio_cache_size_latch and dm_bufio_client_count
 230 */
 231static DEFINE_MUTEX(dm_bufio_clients_lock);
 232
 233static struct workqueue_struct *dm_bufio_wq;
 234static struct delayed_work dm_bufio_cleanup_old_work;
 235static struct work_struct dm_bufio_replacement_work;
 236
 237
 238#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
 239static void buffer_record_stack(struct dm_buffer *b)
 240{
 241        b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2);
 242}
 243#endif
 244
 245/*----------------------------------------------------------------
 246 * A red/black tree acts as an index for all the buffers.
 247 *--------------------------------------------------------------*/
 248static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
 249{
 250        struct rb_node *n = c->buffer_tree.rb_node;
 251        struct dm_buffer *b;
 252
 253        while (n) {
 254                b = container_of(n, struct dm_buffer, node);
 255
 256                if (b->block == block)
 257                        return b;
 258
 259                n = (b->block < block) ? n->rb_left : n->rb_right;
 260        }
 261
 262        return NULL;
 263}
 264
 265static void __insert(struct dm_bufio_client *c, struct dm_buffer *b)
 266{
 267        struct rb_node **new = &c->buffer_tree.rb_node, *parent = NULL;
 268        struct dm_buffer *found;
 269
 270        while (*new) {
 271                found = container_of(*new, struct dm_buffer, node);
 272
 273                if (found->block == b->block) {
 274                        BUG_ON(found != b);
 275                        return;
 276                }
 277
 278                parent = *new;
 279                new = (found->block < b->block) ?
 280                        &((*new)->rb_left) : &((*new)->rb_right);
 281        }
 282
 283        rb_link_node(&b->node, parent, new);
 284        rb_insert_color(&b->node, &c->buffer_tree);
 285}
 286
 287static void __remove(struct dm_bufio_client *c, struct dm_buffer *b)
 288{
 289        rb_erase(&b->node, &c->buffer_tree);
 290}
 291
 292/*----------------------------------------------------------------*/
 293
 294static void adjust_total_allocated(struct dm_buffer *b, bool unlink)
 295{
 296        unsigned char data_mode;
 297        long diff;
 298
 299        static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
 300                &dm_bufio_allocated_kmem_cache,
 301                &dm_bufio_allocated_get_free_pages,
 302                &dm_bufio_allocated_vmalloc,
 303        };
 304
 305        data_mode = b->data_mode;
 306        diff = (long)b->c->block_size;
 307        if (unlink)
 308                diff = -diff;
 309
 310        spin_lock(&global_spinlock);
 311
 312        *class_ptr[data_mode] += diff;
 313
 314        dm_bufio_current_allocated += diff;
 315
 316        if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
 317                dm_bufio_peak_allocated = dm_bufio_current_allocated;
 318
 319        b->accessed = 1;
 320
 321        if (!unlink) {
 322                list_add(&b->global_list, &global_queue);
 323                global_num++;
 324                if (dm_bufio_current_allocated > dm_bufio_cache_size)
 325                        queue_work(dm_bufio_wq, &dm_bufio_replacement_work);
 326        } else {
 327                list_del(&b->global_list);
 328                global_num--;
 329        }
 330
 331        spin_unlock(&global_spinlock);
 332}
 333
 334/*
 335 * Change the number of clients and recalculate per-client limit.
 336 */
 337static void __cache_size_refresh(void)
 338{
 339        BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
 340        BUG_ON(dm_bufio_client_count < 0);
 341
 342        dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size);
 343
 344        /*
 345         * Use default if set to 0 and report the actual cache size used.
 346         */
 347        if (!dm_bufio_cache_size_latch) {
 348                (void)cmpxchg(&dm_bufio_cache_size, 0,
 349                              dm_bufio_default_cache_size);
 350                dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
 351        }
 352}
 353
 354/*
 355 * Allocating buffer data.
 356 *
 357 * Small buffers are allocated with kmem_cache, to use space optimally.
 358 *
 359 * For large buffers, we choose between get_free_pages and vmalloc.
 360 * Each has advantages and disadvantages.
 361 *
 362 * __get_free_pages can randomly fail if the memory is fragmented.
 363 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
 364 * as low as 128M) so using it for caching is not appropriate.
 365 *
 366 * If the allocation may fail we use __get_free_pages. Memory fragmentation
 367 * won't have a fatal effect here, but it just causes flushes of some other
 368 * buffers and more I/O will be performed. Don't use __get_free_pages if it
 369 * always fails (i.e. order >= MAX_ORDER).
 370 *
 371 * If the allocation shouldn't fail we use __vmalloc. This is only for the
 372 * initial reserve allocation, so there's no risk of wasting all vmalloc
 373 * space.
 374 */
 375static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
 376                               unsigned char *data_mode)
 377{
 378        if (unlikely(c->slab_cache != NULL)) {
 379                *data_mode = DATA_MODE_SLAB;
 380                return kmem_cache_alloc(c->slab_cache, gfp_mask);
 381        }
 382
 383        if (c->block_size <= KMALLOC_MAX_SIZE &&
 384            gfp_mask & __GFP_NORETRY) {
 385                *data_mode = DATA_MODE_GET_FREE_PAGES;
 386                return (void *)__get_free_pages(gfp_mask,
 387                                                c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
 388        }
 389
 390        *data_mode = DATA_MODE_VMALLOC;
 391
 392        /*
 393         * __vmalloc allocates the data pages and auxiliary structures with
 394         * gfp_flags that were specified, but pagetables are always allocated
 395         * with GFP_KERNEL, no matter what was specified as gfp_mask.
 396         *
 397         * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
 398         * all allocations done by this process (including pagetables) are done
 399         * as if GFP_NOIO was specified.
 400         */
 401        if (gfp_mask & __GFP_NORETRY) {
 402                unsigned noio_flag = memalloc_noio_save();
 403                void *ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
 404
 405                memalloc_noio_restore(noio_flag);
 406                return ptr;
 407        }
 408
 409        return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
 410}
 411
 412/*
 413 * Free buffer's data.
 414 */
 415static void free_buffer_data(struct dm_bufio_client *c,
 416                             void *data, unsigned char data_mode)
 417{
 418        switch (data_mode) {
 419        case DATA_MODE_SLAB:
 420                kmem_cache_free(c->slab_cache, data);
 421                break;
 422
 423        case DATA_MODE_GET_FREE_PAGES:
 424                free_pages((unsigned long)data,
 425                           c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
 426                break;
 427
 428        case DATA_MODE_VMALLOC:
 429                vfree(data);
 430                break;
 431
 432        default:
 433                DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
 434                       data_mode);
 435                BUG();
 436        }
 437}
 438
 439/*
 440 * Allocate buffer and its data.
 441 */
 442static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
 443{
 444        struct dm_buffer *b = kmem_cache_alloc(c->slab_buffer, gfp_mask);
 445
 446        if (!b)
 447                return NULL;
 448
 449        b->c = c;
 450
 451        b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
 452        if (!b->data) {
 453                kmem_cache_free(c->slab_buffer, b);
 454                return NULL;
 455        }
 456
 457#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
 458        b->stack_len = 0;
 459#endif
 460        return b;
 461}
 462
 463/*
 464 * Free buffer and its data.
 465 */
 466static void free_buffer(struct dm_buffer *b)
 467{
 468        struct dm_bufio_client *c = b->c;
 469
 470        free_buffer_data(c, b->data, b->data_mode);
 471        kmem_cache_free(c->slab_buffer, b);
 472}
 473
 474/*
 475 * Link buffer to the buffer tree and clean or dirty queue.
 476 */
 477static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
 478{
 479        struct dm_bufio_client *c = b->c;
 480
 481        c->n_buffers[dirty]++;
 482        b->block = block;
 483        b->list_mode = dirty;
 484        list_add(&b->lru_list, &c->lru[dirty]);
 485        __insert(b->c, b);
 486        b->last_accessed = jiffies;
 487
 488        adjust_total_allocated(b, false);
 489}
 490
 491/*
 492 * Unlink buffer from the buffer tree and dirty or clean queue.
 493 */
 494static void __unlink_buffer(struct dm_buffer *b)
 495{
 496        struct dm_bufio_client *c = b->c;
 497
 498        BUG_ON(!c->n_buffers[b->list_mode]);
 499
 500        c->n_buffers[b->list_mode]--;
 501        __remove(b->c, b);
 502        list_del(&b->lru_list);
 503
 504        adjust_total_allocated(b, true);
 505}
 506
 507/*
 508 * Place the buffer to the head of dirty or clean LRU queue.
 509 */
 510static void __relink_lru(struct dm_buffer *b, int dirty)
 511{
 512        struct dm_bufio_client *c = b->c;
 513
 514        b->accessed = 1;
 515
 516        BUG_ON(!c->n_buffers[b->list_mode]);
 517
 518        c->n_buffers[b->list_mode]--;
 519        c->n_buffers[dirty]++;
 520        b->list_mode = dirty;
 521        list_move(&b->lru_list, &c->lru[dirty]);
 522        b->last_accessed = jiffies;
 523}
 524
 525/*----------------------------------------------------------------
 526 * Submit I/O on the buffer.
 527 *
 528 * Bio interface is faster but it has some problems:
 529 *      the vector list is limited (increasing this limit increases
 530 *      memory-consumption per buffer, so it is not viable);
 531 *
 532 *      the memory must be direct-mapped, not vmalloced;
 533 *
 534 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
 535 * it is not vmalloced, try using the bio interface.
 536 *
 537 * If the buffer is big, if it is vmalloced or if the underlying device
 538 * rejects the bio because it is too large, use dm-io layer to do the I/O.
 539 * The dm-io layer splits the I/O into multiple requests, avoiding the above
 540 * shortcomings.
 541 *--------------------------------------------------------------*/
 542
 543/*
 544 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
 545 * that the request was handled directly with bio interface.
 546 */
 547static void dmio_complete(unsigned long error, void *context)
 548{
 549        struct dm_buffer *b = context;
 550
 551        b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0);
 552}
 553
 554static void use_dmio(struct dm_buffer *b, int rw, sector_t sector,
 555                     unsigned n_sectors, unsigned offset)
 556{
 557        int r;
 558        struct dm_io_request io_req = {
 559                .bi_op = rw,
 560                .bi_op_flags = 0,
 561                .notify.fn = dmio_complete,
 562                .notify.context = b,
 563                .client = b->c->dm_io,
 564        };
 565        struct dm_io_region region = {
 566                .bdev = b->c->bdev,
 567                .sector = sector,
 568                .count = n_sectors,
 569        };
 570
 571        if (b->data_mode != DATA_MODE_VMALLOC) {
 572                io_req.mem.type = DM_IO_KMEM;
 573                io_req.mem.ptr.addr = (char *)b->data + offset;
 574        } else {
 575                io_req.mem.type = DM_IO_VMA;
 576                io_req.mem.ptr.vma = (char *)b->data + offset;
 577        }
 578
 579        r = dm_io(&io_req, 1, &region, NULL);
 580        if (unlikely(r))
 581                b->end_io(b, errno_to_blk_status(r));
 582}
 583
 584static void bio_complete(struct bio *bio)
 585{
 586        struct dm_buffer *b = bio->bi_private;
 587        blk_status_t status = bio->bi_status;
 588        bio_put(bio);
 589        b->end_io(b, status);
 590}
 591
 592static void use_bio(struct dm_buffer *b, int rw, sector_t sector,
 593                    unsigned n_sectors, unsigned offset)
 594{
 595        struct bio *bio;
 596        char *ptr;
 597        unsigned vec_size, len;
 598
 599        vec_size = b->c->block_size >> PAGE_SHIFT;
 600        if (unlikely(b->c->sectors_per_block_bits < PAGE_SHIFT - SECTOR_SHIFT))
 601                vec_size += 2;
 602
 603        bio = bio_kmalloc(GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN, vec_size);
 604        if (!bio) {
 605dmio:
 606                use_dmio(b, rw, sector, n_sectors, offset);
 607                return;
 608        }
 609
 610        bio->bi_iter.bi_sector = sector;
 611        bio_set_dev(bio, b->c->bdev);
 612        bio_set_op_attrs(bio, rw, 0);
 613        bio->bi_end_io = bio_complete;
 614        bio->bi_private = b;
 615
 616        ptr = (char *)b->data + offset;
 617        len = n_sectors << SECTOR_SHIFT;
 618
 619        do {
 620                unsigned this_step = min((unsigned)(PAGE_SIZE - offset_in_page(ptr)), len);
 621                if (!bio_add_page(bio, virt_to_page(ptr), this_step,
 622                                  offset_in_page(ptr))) {
 623                        bio_put(bio);
 624                        goto dmio;
 625                }
 626
 627                len -= this_step;
 628                ptr += this_step;
 629        } while (len > 0);
 630
 631        submit_bio(bio);
 632}
 633
 634static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buffer *, blk_status_t))
 635{
 636        unsigned n_sectors;
 637        sector_t sector;
 638        unsigned offset, end;
 639
 640        b->end_io = end_io;
 641
 642        if (likely(b->c->sectors_per_block_bits >= 0))
 643                sector = b->block << b->c->sectors_per_block_bits;
 644        else
 645                sector = b->block * (b->c->block_size >> SECTOR_SHIFT);
 646        sector += b->c->start;
 647
 648        if (rw != REQ_OP_WRITE) {
 649                n_sectors = b->c->block_size >> SECTOR_SHIFT;
 650                offset = 0;
 651        } else {
 652                if (b->c->write_callback)
 653                        b->c->write_callback(b);
 654                offset = b->write_start;
 655                end = b->write_end;
 656                offset &= -DM_BUFIO_WRITE_ALIGN;
 657                end += DM_BUFIO_WRITE_ALIGN - 1;
 658                end &= -DM_BUFIO_WRITE_ALIGN;
 659                if (unlikely(end > b->c->block_size))
 660                        end = b->c->block_size;
 661
 662                sector += offset >> SECTOR_SHIFT;
 663                n_sectors = (end - offset) >> SECTOR_SHIFT;
 664        }
 665
 666        if (b->data_mode != DATA_MODE_VMALLOC)
 667                use_bio(b, rw, sector, n_sectors, offset);
 668        else
 669                use_dmio(b, rw, sector, n_sectors, offset);
 670}
 671
 672/*----------------------------------------------------------------
 673 * Writing dirty buffers
 674 *--------------------------------------------------------------*/
 675
 676/*
 677 * The endio routine for write.
 678 *
 679 * Set the error, clear B_WRITING bit and wake anyone who was waiting on
 680 * it.
 681 */
 682static void write_endio(struct dm_buffer *b, blk_status_t status)
 683{
 684        b->write_error = status;
 685        if (unlikely(status)) {
 686                struct dm_bufio_client *c = b->c;
 687
 688                (void)cmpxchg(&c->async_write_error, 0,
 689                                blk_status_to_errno(status));
 690        }
 691
 692        BUG_ON(!test_bit(B_WRITING, &b->state));
 693
 694        smp_mb__before_atomic();
 695        clear_bit(B_WRITING, &b->state);
 696        smp_mb__after_atomic();
 697
 698        wake_up_bit(&b->state, B_WRITING);
 699}
 700
 701/*
 702 * Initiate a write on a dirty buffer, but don't wait for it.
 703 *
 704 * - If the buffer is not dirty, exit.
 705 * - If there some previous write going on, wait for it to finish (we can't
 706 *   have two writes on the same buffer simultaneously).
 707 * - Submit our write and don't wait on it. We set B_WRITING indicating
 708 *   that there is a write in progress.
 709 */
 710static void __write_dirty_buffer(struct dm_buffer *b,
 711                                 struct list_head *write_list)
 712{
 713        if (!test_bit(B_DIRTY, &b->state))
 714                return;
 715
 716        clear_bit(B_DIRTY, &b->state);
 717        wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
 718
 719        b->write_start = b->dirty_start;
 720        b->write_end = b->dirty_end;
 721
 722        if (!write_list)
 723                submit_io(b, REQ_OP_WRITE, write_endio);
 724        else
 725                list_add_tail(&b->write_list, write_list);
 726}
 727
 728static void __flush_write_list(struct list_head *write_list)
 729{
 730        struct blk_plug plug;
 731        blk_start_plug(&plug);
 732        while (!list_empty(write_list)) {
 733                struct dm_buffer *b =
 734                        list_entry(write_list->next, struct dm_buffer, write_list);
 735                list_del(&b->write_list);
 736                submit_io(b, REQ_OP_WRITE, write_endio);
 737                cond_resched();
 738        }
 739        blk_finish_plug(&plug);
 740}
 741
 742/*
 743 * Wait until any activity on the buffer finishes.  Possibly write the
 744 * buffer if it is dirty.  When this function finishes, there is no I/O
 745 * running on the buffer and the buffer is not dirty.
 746 */
 747static void __make_buffer_clean(struct dm_buffer *b)
 748{
 749        BUG_ON(b->hold_count);
 750
 751        if (!b->state)  /* fast case */
 752                return;
 753
 754        wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
 755        __write_dirty_buffer(b, NULL);
 756        wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
 757}
 758
 759/*
 760 * Find some buffer that is not held by anybody, clean it, unlink it and
 761 * return it.
 762 */
 763static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
 764{
 765        struct dm_buffer *b;
 766
 767        list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) {
 768                BUG_ON(test_bit(B_WRITING, &b->state));
 769                BUG_ON(test_bit(B_DIRTY, &b->state));
 770
 771                if (!b->hold_count) {
 772                        __make_buffer_clean(b);
 773                        __unlink_buffer(b);
 774                        return b;
 775                }
 776                cond_resched();
 777        }
 778
 779        list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
 780                BUG_ON(test_bit(B_READING, &b->state));
 781
 782                if (!b->hold_count) {
 783                        __make_buffer_clean(b);
 784                        __unlink_buffer(b);
 785                        return b;
 786                }
 787                cond_resched();
 788        }
 789
 790        return NULL;
 791}
 792
 793/*
 794 * Wait until some other threads free some buffer or release hold count on
 795 * some buffer.
 796 *
 797 * This function is entered with c->lock held, drops it and regains it
 798 * before exiting.
 799 */
 800static void __wait_for_free_buffer(struct dm_bufio_client *c)
 801{
 802        DECLARE_WAITQUEUE(wait, current);
 803
 804        add_wait_queue(&c->free_buffer_wait, &wait);
 805        set_current_state(TASK_UNINTERRUPTIBLE);
 806        dm_bufio_unlock(c);
 807
 808        io_schedule();
 809
 810        remove_wait_queue(&c->free_buffer_wait, &wait);
 811
 812        dm_bufio_lock(c);
 813}
 814
 815enum new_flag {
 816        NF_FRESH = 0,
 817        NF_READ = 1,
 818        NF_GET = 2,
 819        NF_PREFETCH = 3
 820};
 821
 822/*
 823 * Allocate a new buffer. If the allocation is not possible, wait until
 824 * some other thread frees a buffer.
 825 *
 826 * May drop the lock and regain it.
 827 */
 828static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
 829{
 830        struct dm_buffer *b;
 831        bool tried_noio_alloc = false;
 832
 833        /*
 834         * dm-bufio is resistant to allocation failures (it just keeps
 835         * one buffer reserved in cases all the allocations fail).
 836         * So set flags to not try too hard:
 837         *      GFP_NOWAIT: don't wait; if we need to sleep we'll release our
 838         *                  mutex and wait ourselves.
 839         *      __GFP_NORETRY: don't retry and rather return failure
 840         *      __GFP_NOMEMALLOC: don't use emergency reserves
 841         *      __GFP_NOWARN: don't print a warning in case of failure
 842         *
 843         * For debugging, if we set the cache size to 1, no new buffers will
 844         * be allocated.
 845         */
 846        while (1) {
 847                if (dm_bufio_cache_size_latch != 1) {
 848                        b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
 849                        if (b)
 850                                return b;
 851                }
 852
 853                if (nf == NF_PREFETCH)
 854                        return NULL;
 855
 856                if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) {
 857                        dm_bufio_unlock(c);
 858                        b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
 859                        dm_bufio_lock(c);
 860                        if (b)
 861                                return b;
 862                        tried_noio_alloc = true;
 863                }
 864
 865                if (!list_empty(&c->reserved_buffers)) {
 866                        b = list_entry(c->reserved_buffers.next,
 867                                       struct dm_buffer, lru_list);
 868                        list_del(&b->lru_list);
 869                        c->need_reserved_buffers++;
 870
 871                        return b;
 872                }
 873
 874                b = __get_unclaimed_buffer(c);
 875                if (b)
 876                        return b;
 877
 878                __wait_for_free_buffer(c);
 879        }
 880}
 881
 882static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
 883{
 884        struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
 885
 886        if (!b)
 887                return NULL;
 888
 889        if (c->alloc_callback)
 890                c->alloc_callback(b);
 891
 892        return b;
 893}
 894
 895/*
 896 * Free a buffer and wake other threads waiting for free buffers.
 897 */
 898static void __free_buffer_wake(struct dm_buffer *b)
 899{
 900        struct dm_bufio_client *c = b->c;
 901
 902        if (!c->need_reserved_buffers)
 903                free_buffer(b);
 904        else {
 905                list_add(&b->lru_list, &c->reserved_buffers);
 906                c->need_reserved_buffers--;
 907        }
 908
 909        wake_up(&c->free_buffer_wait);
 910}
 911
 912static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
 913                                        struct list_head *write_list)
 914{
 915        struct dm_buffer *b, *tmp;
 916
 917        list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
 918                BUG_ON(test_bit(B_READING, &b->state));
 919
 920                if (!test_bit(B_DIRTY, &b->state) &&
 921                    !test_bit(B_WRITING, &b->state)) {
 922                        __relink_lru(b, LIST_CLEAN);
 923                        continue;
 924                }
 925
 926                if (no_wait && test_bit(B_WRITING, &b->state))
 927                        return;
 928
 929                __write_dirty_buffer(b, write_list);
 930                cond_resched();
 931        }
 932}
 933
 934/*
 935 * Check if we're over watermark.
 936 * If we are over threshold_buffers, start freeing buffers.
 937 * If we're over "limit_buffers", block until we get under the limit.
 938 */
 939static void __check_watermark(struct dm_bufio_client *c,
 940                              struct list_head *write_list)
 941{
 942        if (c->n_buffers[LIST_DIRTY] > c->n_buffers[LIST_CLEAN] * DM_BUFIO_WRITEBACK_RATIO)
 943                __write_dirty_buffers_async(c, 1, write_list);
 944}
 945
 946/*----------------------------------------------------------------
 947 * Getting a buffer
 948 *--------------------------------------------------------------*/
 949
 950static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
 951                                     enum new_flag nf, int *need_submit,
 952                                     struct list_head *write_list)
 953{
 954        struct dm_buffer *b, *new_b = NULL;
 955
 956        *need_submit = 0;
 957
 958        b = __find(c, block);
 959        if (b)
 960                goto found_buffer;
 961
 962        if (nf == NF_GET)
 963                return NULL;
 964
 965        new_b = __alloc_buffer_wait(c, nf);
 966        if (!new_b)
 967                return NULL;
 968
 969        /*
 970         * We've had a period where the mutex was unlocked, so need to
 971         * recheck the buffer tree.
 972         */
 973        b = __find(c, block);
 974        if (b) {
 975                __free_buffer_wake(new_b);
 976                goto found_buffer;
 977        }
 978
 979        __check_watermark(c, write_list);
 980
 981        b = new_b;
 982        b->hold_count = 1;
 983        b->read_error = 0;
 984        b->write_error = 0;
 985        __link_buffer(b, block, LIST_CLEAN);
 986
 987        if (nf == NF_FRESH) {
 988                b->state = 0;
 989                return b;
 990        }
 991
 992        b->state = 1 << B_READING;
 993        *need_submit = 1;
 994
 995        return b;
 996
 997found_buffer:
 998        if (nf == NF_PREFETCH)
 999                return NULL;
1000        /*
1001         * Note: it is essential that we don't wait for the buffer to be
1002         * read if dm_bufio_get function is used. Both dm_bufio_get and
1003         * dm_bufio_prefetch can be used in the driver request routine.
1004         * If the user called both dm_bufio_prefetch and dm_bufio_get on
1005         * the same buffer, it would deadlock if we waited.
1006         */
1007        if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state)))
1008                return NULL;
1009
1010        b->hold_count++;
1011        __relink_lru(b, test_bit(B_DIRTY, &b->state) ||
1012                     test_bit(B_WRITING, &b->state));
1013        return b;
1014}
1015
1016/*
1017 * The endio routine for reading: set the error, clear the bit and wake up
1018 * anyone waiting on the buffer.
1019 */
1020static void read_endio(struct dm_buffer *b, blk_status_t status)
1021{
1022        b->read_error = status;
1023
1024        BUG_ON(!test_bit(B_READING, &b->state));
1025
1026        smp_mb__before_atomic();
1027        clear_bit(B_READING, &b->state);
1028        smp_mb__after_atomic();
1029
1030        wake_up_bit(&b->state, B_READING);
1031}
1032
1033/*
1034 * A common routine for dm_bufio_new and dm_bufio_read.  Operation of these
1035 * functions is similar except that dm_bufio_new doesn't read the
1036 * buffer from the disk (assuming that the caller overwrites all the data
1037 * and uses dm_bufio_mark_buffer_dirty to write new data back).
1038 */
1039static void *new_read(struct dm_bufio_client *c, sector_t block,
1040                      enum new_flag nf, struct dm_buffer **bp)
1041{
1042        int need_submit;
1043        struct dm_buffer *b;
1044
1045        LIST_HEAD(write_list);
1046
1047        dm_bufio_lock(c);
1048        b = __bufio_new(c, block, nf, &need_submit, &write_list);
1049#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1050        if (b && b->hold_count == 1)
1051                buffer_record_stack(b);
1052#endif
1053        dm_bufio_unlock(c);
1054
1055        __flush_write_list(&write_list);
1056
1057        if (!b)
1058                return NULL;
1059
1060        if (need_submit)
1061                submit_io(b, REQ_OP_READ, read_endio);
1062
1063        wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1064
1065        if (b->read_error) {
1066                int error = blk_status_to_errno(b->read_error);
1067
1068                dm_bufio_release(b);
1069
1070                return ERR_PTR(error);
1071        }
1072
1073        *bp = b;
1074
1075        return b->data;
1076}
1077
1078void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1079                   struct dm_buffer **bp)
1080{
1081        return new_read(c, block, NF_GET, bp);
1082}
1083EXPORT_SYMBOL_GPL(dm_bufio_get);
1084
1085void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1086                    struct dm_buffer **bp)
1087{
1088        BUG_ON(dm_bufio_in_request());
1089
1090        return new_read(c, block, NF_READ, bp);
1091}
1092EXPORT_SYMBOL_GPL(dm_bufio_read);
1093
1094void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1095                   struct dm_buffer **bp)
1096{
1097        BUG_ON(dm_bufio_in_request());
1098
1099        return new_read(c, block, NF_FRESH, bp);
1100}
1101EXPORT_SYMBOL_GPL(dm_bufio_new);
1102
1103void dm_bufio_prefetch(struct dm_bufio_client *c,
1104                       sector_t block, unsigned n_blocks)
1105{
1106        struct blk_plug plug;
1107
1108        LIST_HEAD(write_list);
1109
1110        BUG_ON(dm_bufio_in_request());
1111
1112        blk_start_plug(&plug);
1113        dm_bufio_lock(c);
1114
1115        for (; n_blocks--; block++) {
1116                int need_submit;
1117                struct dm_buffer *b;
1118                b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
1119                                &write_list);
1120                if (unlikely(!list_empty(&write_list))) {
1121                        dm_bufio_unlock(c);
1122                        blk_finish_plug(&plug);
1123                        __flush_write_list(&write_list);
1124                        blk_start_plug(&plug);
1125                        dm_bufio_lock(c);
1126                }
1127                if (unlikely(b != NULL)) {
1128                        dm_bufio_unlock(c);
1129
1130                        if (need_submit)
1131                                submit_io(b, REQ_OP_READ, read_endio);
1132                        dm_bufio_release(b);
1133
1134                        cond_resched();
1135
1136                        if (!n_blocks)
1137                                goto flush_plug;
1138                        dm_bufio_lock(c);
1139                }
1140        }
1141
1142        dm_bufio_unlock(c);
1143
1144flush_plug:
1145        blk_finish_plug(&plug);
1146}
1147EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
1148
1149void dm_bufio_release(struct dm_buffer *b)
1150{
1151        struct dm_bufio_client *c = b->c;
1152
1153        dm_bufio_lock(c);
1154
1155        BUG_ON(!b->hold_count);
1156
1157        b->hold_count--;
1158        if (!b->hold_count) {
1159                wake_up(&c->free_buffer_wait);
1160
1161                /*
1162                 * If there were errors on the buffer, and the buffer is not
1163                 * to be written, free the buffer. There is no point in caching
1164                 * invalid buffer.
1165                 */
1166                if ((b->read_error || b->write_error) &&
1167                    !test_bit(B_READING, &b->state) &&
1168                    !test_bit(B_WRITING, &b->state) &&
1169                    !test_bit(B_DIRTY, &b->state)) {
1170                        __unlink_buffer(b);
1171                        __free_buffer_wake(b);
1172                }
1173        }
1174
1175        dm_bufio_unlock(c);
1176}
1177EXPORT_SYMBOL_GPL(dm_bufio_release);
1178
1179void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
1180                                        unsigned start, unsigned end)
1181{
1182        struct dm_bufio_client *c = b->c;
1183
1184        BUG_ON(start >= end);
1185        BUG_ON(end > b->c->block_size);
1186
1187        dm_bufio_lock(c);
1188
1189        BUG_ON(test_bit(B_READING, &b->state));
1190
1191        if (!test_and_set_bit(B_DIRTY, &b->state)) {
1192                b->dirty_start = start;
1193                b->dirty_end = end;
1194                __relink_lru(b, LIST_DIRTY);
1195        } else {
1196                if (start < b->dirty_start)
1197                        b->dirty_start = start;
1198                if (end > b->dirty_end)
1199                        b->dirty_end = end;
1200        }
1201
1202        dm_bufio_unlock(c);
1203}
1204EXPORT_SYMBOL_GPL(dm_bufio_mark_partial_buffer_dirty);
1205
1206void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
1207{
1208        dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size);
1209}
1210EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
1211
1212void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
1213{
1214        LIST_HEAD(write_list);
1215
1216        BUG_ON(dm_bufio_in_request());
1217
1218        dm_bufio_lock(c);
1219        __write_dirty_buffers_async(c, 0, &write_list);
1220        dm_bufio_unlock(c);
1221        __flush_write_list(&write_list);
1222}
1223EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
1224
1225/*
1226 * For performance, it is essential that the buffers are written asynchronously
1227 * and simultaneously (so that the block layer can merge the writes) and then
1228 * waited upon.
1229 *
1230 * Finally, we flush hardware disk cache.
1231 */
1232int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
1233{
1234        int a, f;
1235        unsigned long buffers_processed = 0;
1236        struct dm_buffer *b, *tmp;
1237
1238        LIST_HEAD(write_list);
1239
1240        dm_bufio_lock(c);
1241        __write_dirty_buffers_async(c, 0, &write_list);
1242        dm_bufio_unlock(c);
1243        __flush_write_list(&write_list);
1244        dm_bufio_lock(c);
1245
1246again:
1247        list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
1248                int dropped_lock = 0;
1249
1250                if (buffers_processed < c->n_buffers[LIST_DIRTY])
1251                        buffers_processed++;
1252
1253                BUG_ON(test_bit(B_READING, &b->state));
1254
1255                if (test_bit(B_WRITING, &b->state)) {
1256                        if (buffers_processed < c->n_buffers[LIST_DIRTY]) {
1257                                dropped_lock = 1;
1258                                b->hold_count++;
1259                                dm_bufio_unlock(c);
1260                                wait_on_bit_io(&b->state, B_WRITING,
1261                                               TASK_UNINTERRUPTIBLE);
1262                                dm_bufio_lock(c);
1263                                b->hold_count--;
1264                        } else
1265                                wait_on_bit_io(&b->state, B_WRITING,
1266                                               TASK_UNINTERRUPTIBLE);
1267                }
1268
1269                if (!test_bit(B_DIRTY, &b->state) &&
1270                    !test_bit(B_WRITING, &b->state))
1271                        __relink_lru(b, LIST_CLEAN);
1272
1273                cond_resched();
1274
1275                /*
1276                 * If we dropped the lock, the list is no longer consistent,
1277                 * so we must restart the search.
1278                 *
1279                 * In the most common case, the buffer just processed is
1280                 * relinked to the clean list, so we won't loop scanning the
1281                 * same buffer again and again.
1282                 *
1283                 * This may livelock if there is another thread simultaneously
1284                 * dirtying buffers, so we count the number of buffers walked
1285                 * and if it exceeds the total number of buffers, it means that
1286                 * someone is doing some writes simultaneously with us.  In
1287                 * this case, stop, dropping the lock.
1288                 */
1289                if (dropped_lock)
1290                        goto again;
1291        }
1292        wake_up(&c->free_buffer_wait);
1293        dm_bufio_unlock(c);
1294
1295        a = xchg(&c->async_write_error, 0);
1296        f = dm_bufio_issue_flush(c);
1297        if (a)
1298                return a;
1299
1300        return f;
1301}
1302EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
1303
1304/*
1305 * Use dm-io to send an empty barrier to flush the device.
1306 */
1307int dm_bufio_issue_flush(struct dm_bufio_client *c)
1308{
1309        struct dm_io_request io_req = {
1310                .bi_op = REQ_OP_WRITE,
1311                .bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
1312                .mem.type = DM_IO_KMEM,
1313                .mem.ptr.addr = NULL,
1314                .client = c->dm_io,
1315        };
1316        struct dm_io_region io_reg = {
1317                .bdev = c->bdev,
1318                .sector = 0,
1319                .count = 0,
1320        };
1321
1322        BUG_ON(dm_bufio_in_request());
1323
1324        return dm_io(&io_req, 1, &io_reg, NULL);
1325}
1326EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
1327
1328/*
1329 * We first delete any other buffer that may be at that new location.
1330 *
1331 * Then, we write the buffer to the original location if it was dirty.
1332 *
1333 * Then, if we are the only one who is holding the buffer, relink the buffer
1334 * in the buffer tree for the new location.
1335 *
1336 * If there was someone else holding the buffer, we write it to the new
1337 * location but not relink it, because that other user needs to have the buffer
1338 * at the same place.
1339 */
1340void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
1341{
1342        struct dm_bufio_client *c = b->c;
1343        struct dm_buffer *new;
1344
1345        BUG_ON(dm_bufio_in_request());
1346
1347        dm_bufio_lock(c);
1348
1349retry:
1350        new = __find(c, new_block);
1351        if (new) {
1352                if (new->hold_count) {
1353                        __wait_for_free_buffer(c);
1354                        goto retry;
1355                }
1356
1357                /*
1358                 * FIXME: Is there any point waiting for a write that's going
1359                 * to be overwritten in a bit?
1360                 */
1361                __make_buffer_clean(new);
1362                __unlink_buffer(new);
1363                __free_buffer_wake(new);
1364        }
1365
1366        BUG_ON(!b->hold_count);
1367        BUG_ON(test_bit(B_READING, &b->state));
1368
1369        __write_dirty_buffer(b, NULL);
1370        if (b->hold_count == 1) {
1371                wait_on_bit_io(&b->state, B_WRITING,
1372                               TASK_UNINTERRUPTIBLE);
1373                set_bit(B_DIRTY, &b->state);
1374                b->dirty_start = 0;
1375                b->dirty_end = c->block_size;
1376                __unlink_buffer(b);
1377                __link_buffer(b, new_block, LIST_DIRTY);
1378        } else {
1379                sector_t old_block;
1380                wait_on_bit_lock_io(&b->state, B_WRITING,
1381                                    TASK_UNINTERRUPTIBLE);
1382                /*
1383                 * Relink buffer to "new_block" so that write_callback
1384                 * sees "new_block" as a block number.
1385                 * After the write, link the buffer back to old_block.
1386                 * All this must be done in bufio lock, so that block number
1387                 * change isn't visible to other threads.
1388                 */
1389                old_block = b->block;
1390                __unlink_buffer(b);
1391                __link_buffer(b, new_block, b->list_mode);
1392                submit_io(b, REQ_OP_WRITE, write_endio);
1393                wait_on_bit_io(&b->state, B_WRITING,
1394                               TASK_UNINTERRUPTIBLE);
1395                __unlink_buffer(b);
1396                __link_buffer(b, old_block, b->list_mode);
1397        }
1398
1399        dm_bufio_unlock(c);
1400        dm_bufio_release(b);
1401}
1402EXPORT_SYMBOL_GPL(dm_bufio_release_move);
1403
1404/*
1405 * Free the given buffer.
1406 *
1407 * This is just a hint, if the buffer is in use or dirty, this function
1408 * does nothing.
1409 */
1410void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
1411{
1412        struct dm_buffer *b;
1413
1414        dm_bufio_lock(c);
1415
1416        b = __find(c, block);
1417        if (b && likely(!b->hold_count) && likely(!b->state)) {
1418                __unlink_buffer(b);
1419                __free_buffer_wake(b);
1420        }
1421
1422        dm_bufio_unlock(c);
1423}
1424EXPORT_SYMBOL_GPL(dm_bufio_forget);
1425
1426void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n)
1427{
1428        c->minimum_buffers = n;
1429}
1430EXPORT_SYMBOL_GPL(dm_bufio_set_minimum_buffers);
1431
1432unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
1433{
1434        return c->block_size;
1435}
1436EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
1437
1438sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
1439{
1440        sector_t s = i_size_read(c->bdev->bd_inode) >> SECTOR_SHIFT;
1441        if (likely(c->sectors_per_block_bits >= 0))
1442                s >>= c->sectors_per_block_bits;
1443        else
1444                sector_div(s, c->block_size >> SECTOR_SHIFT);
1445        return s;
1446}
1447EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
1448
1449sector_t dm_bufio_get_block_number(struct dm_buffer *b)
1450{
1451        return b->block;
1452}
1453EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
1454
1455void *dm_bufio_get_block_data(struct dm_buffer *b)
1456{
1457        return b->data;
1458}
1459EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
1460
1461void *dm_bufio_get_aux_data(struct dm_buffer *b)
1462{
1463        return b + 1;
1464}
1465EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
1466
1467struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
1468{
1469        return b->c;
1470}
1471EXPORT_SYMBOL_GPL(dm_bufio_get_client);
1472
1473static void drop_buffers(struct dm_bufio_client *c)
1474{
1475        struct dm_buffer *b;
1476        int i;
1477        bool warned = false;
1478
1479        BUG_ON(dm_bufio_in_request());
1480
1481        /*
1482         * An optimization so that the buffers are not written one-by-one.
1483         */
1484        dm_bufio_write_dirty_buffers_async(c);
1485
1486        dm_bufio_lock(c);
1487
1488        while ((b = __get_unclaimed_buffer(c)))
1489                __free_buffer_wake(b);
1490
1491        for (i = 0; i < LIST_SIZE; i++)
1492                list_for_each_entry(b, &c->lru[i], lru_list) {
1493                        WARN_ON(!warned);
1494                        warned = true;
1495                        DMERR("leaked buffer %llx, hold count %u, list %d",
1496                              (unsigned long long)b->block, b->hold_count, i);
1497#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1498                        stack_trace_print(b->stack_entries, b->stack_len, 1);
1499                        /* mark unclaimed to avoid BUG_ON below */
1500                        b->hold_count = 0;
1501#endif
1502                }
1503
1504#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1505        while ((b = __get_unclaimed_buffer(c)))
1506                __free_buffer_wake(b);
1507#endif
1508
1509        for (i = 0; i < LIST_SIZE; i++)
1510                BUG_ON(!list_empty(&c->lru[i]));
1511
1512        dm_bufio_unlock(c);
1513}
1514
1515/*
1516 * We may not be able to evict this buffer if IO pending or the client
1517 * is still using it.  Caller is expected to know buffer is too old.
1518 *
1519 * And if GFP_NOFS is used, we must not do any I/O because we hold
1520 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
1521 * rerouted to different bufio client.
1522 */
1523static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
1524{
1525        if (!(gfp & __GFP_FS)) {
1526                if (test_bit(B_READING, &b->state) ||
1527                    test_bit(B_WRITING, &b->state) ||
1528                    test_bit(B_DIRTY, &b->state))
1529                        return false;
1530        }
1531
1532        if (b->hold_count)
1533                return false;
1534
1535        __make_buffer_clean(b);
1536        __unlink_buffer(b);
1537        __free_buffer_wake(b);
1538
1539        return true;
1540}
1541
1542static unsigned long get_retain_buffers(struct dm_bufio_client *c)
1543{
1544        unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes);
1545        if (likely(c->sectors_per_block_bits >= 0))
1546                retain_bytes >>= c->sectors_per_block_bits + SECTOR_SHIFT;
1547        else
1548                retain_bytes /= c->block_size;
1549        return retain_bytes;
1550}
1551
1552static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
1553                            gfp_t gfp_mask)
1554{
1555        int l;
1556        struct dm_buffer *b, *tmp;
1557        unsigned long freed = 0;
1558        unsigned long count = c->n_buffers[LIST_CLEAN] +
1559                              c->n_buffers[LIST_DIRTY];
1560        unsigned long retain_target = get_retain_buffers(c);
1561
1562        for (l = 0; l < LIST_SIZE; l++) {
1563                list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
1564                        if (__try_evict_buffer(b, gfp_mask))
1565                                freed++;
1566                        if (!--nr_to_scan || ((count - freed) <= retain_target))
1567                                return freed;
1568                        cond_resched();
1569                }
1570        }
1571        return freed;
1572}
1573
1574static unsigned long
1575dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1576{
1577        struct dm_bufio_client *c;
1578        unsigned long freed;
1579
1580        c = container_of(shrink, struct dm_bufio_client, shrinker);
1581        if (sc->gfp_mask & __GFP_FS)
1582                dm_bufio_lock(c);
1583        else if (!dm_bufio_trylock(c))
1584                return SHRINK_STOP;
1585
1586        freed  = __scan(c, sc->nr_to_scan, sc->gfp_mask);
1587        dm_bufio_unlock(c);
1588        return freed;
1589}
1590
1591static unsigned long
1592dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1593{
1594        struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
1595        unsigned long count = READ_ONCE(c->n_buffers[LIST_CLEAN]) +
1596                              READ_ONCE(c->n_buffers[LIST_DIRTY]);
1597        unsigned long retain_target = get_retain_buffers(c);
1598
1599        return (count < retain_target) ? 0 : (count - retain_target);
1600}
1601
1602/*
1603 * Create the buffering interface
1604 */
1605struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
1606                                               unsigned reserved_buffers, unsigned aux_size,
1607                                               void (*alloc_callback)(struct dm_buffer *),
1608                                               void (*write_callback)(struct dm_buffer *))
1609{
1610        int r;
1611        struct dm_bufio_client *c;
1612        unsigned i;
1613        char slab_name[27];
1614
1615        if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) {
1616                DMERR("%s: block size not specified or is not multiple of 512b", __func__);
1617                r = -EINVAL;
1618                goto bad_client;
1619        }
1620
1621        c = kzalloc(sizeof(*c), GFP_KERNEL);
1622        if (!c) {
1623                r = -ENOMEM;
1624                goto bad_client;
1625        }
1626        c->buffer_tree = RB_ROOT;
1627
1628        c->bdev = bdev;
1629        c->block_size = block_size;
1630        if (is_power_of_2(block_size))
1631                c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT;
1632        else
1633                c->sectors_per_block_bits = -1;
1634
1635        c->alloc_callback = alloc_callback;
1636        c->write_callback = write_callback;
1637
1638        for (i = 0; i < LIST_SIZE; i++) {
1639                INIT_LIST_HEAD(&c->lru[i]);
1640                c->n_buffers[i] = 0;
1641        }
1642
1643        mutex_init(&c->lock);
1644        INIT_LIST_HEAD(&c->reserved_buffers);
1645        c->need_reserved_buffers = reserved_buffers;
1646
1647        dm_bufio_set_minimum_buffers(c, DM_BUFIO_MIN_BUFFERS);
1648
1649        init_waitqueue_head(&c->free_buffer_wait);
1650        c->async_write_error = 0;
1651
1652        c->dm_io = dm_io_client_create();
1653        if (IS_ERR(c->dm_io)) {
1654                r = PTR_ERR(c->dm_io);
1655                goto bad_dm_io;
1656        }
1657
1658        if (block_size <= KMALLOC_MAX_SIZE &&
1659            (block_size < PAGE_SIZE || !is_power_of_2(block_size))) {
1660                unsigned align = min(1U << __ffs(block_size), (unsigned)PAGE_SIZE);
1661                snprintf(slab_name, sizeof slab_name, "dm_bufio_cache-%u", block_size);
1662                c->slab_cache = kmem_cache_create(slab_name, block_size, align,
1663                                                  SLAB_RECLAIM_ACCOUNT, NULL);
1664                if (!c->slab_cache) {
1665                        r = -ENOMEM;
1666                        goto bad;
1667                }
1668        }
1669        if (aux_size)
1670                snprintf(slab_name, sizeof slab_name, "dm_bufio_buffer-%u", aux_size);
1671        else
1672                snprintf(slab_name, sizeof slab_name, "dm_bufio_buffer");
1673        c->slab_buffer = kmem_cache_create(slab_name, sizeof(struct dm_buffer) + aux_size,
1674                                           0, SLAB_RECLAIM_ACCOUNT, NULL);
1675        if (!c->slab_buffer) {
1676                r = -ENOMEM;
1677                goto bad;
1678        }
1679
1680        while (c->need_reserved_buffers) {
1681                struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
1682
1683                if (!b) {
1684                        r = -ENOMEM;
1685                        goto bad;
1686                }
1687                __free_buffer_wake(b);
1688        }
1689
1690        c->shrinker.count_objects = dm_bufio_shrink_count;
1691        c->shrinker.scan_objects = dm_bufio_shrink_scan;
1692        c->shrinker.seeks = 1;
1693        c->shrinker.batch = 0;
1694        r = register_shrinker(&c->shrinker);
1695        if (r)
1696                goto bad;
1697
1698        mutex_lock(&dm_bufio_clients_lock);
1699        dm_bufio_client_count++;
1700        list_add(&c->client_list, &dm_bufio_all_clients);
1701        __cache_size_refresh();
1702        mutex_unlock(&dm_bufio_clients_lock);
1703
1704        return c;
1705
1706bad:
1707        while (!list_empty(&c->reserved_buffers)) {
1708                struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1709                                                 struct dm_buffer, lru_list);
1710                list_del(&b->lru_list);
1711                free_buffer(b);
1712        }
1713        kmem_cache_destroy(c->slab_cache);
1714        kmem_cache_destroy(c->slab_buffer);
1715        dm_io_client_destroy(c->dm_io);
1716bad_dm_io:
1717        mutex_destroy(&c->lock);
1718        kfree(c);
1719bad_client:
1720        return ERR_PTR(r);
1721}
1722EXPORT_SYMBOL_GPL(dm_bufio_client_create);
1723
1724/*
1725 * Free the buffering interface.
1726 * It is required that there are no references on any buffers.
1727 */
1728void dm_bufio_client_destroy(struct dm_bufio_client *c)
1729{
1730        unsigned i;
1731
1732        drop_buffers(c);
1733
1734        unregister_shrinker(&c->shrinker);
1735
1736        mutex_lock(&dm_bufio_clients_lock);
1737
1738        list_del(&c->client_list);
1739        dm_bufio_client_count--;
1740        __cache_size_refresh();
1741
1742        mutex_unlock(&dm_bufio_clients_lock);
1743
1744        BUG_ON(!RB_EMPTY_ROOT(&c->buffer_tree));
1745        BUG_ON(c->need_reserved_buffers);
1746
1747        while (!list_empty(&c->reserved_buffers)) {
1748                struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1749                                                 struct dm_buffer, lru_list);
1750                list_del(&b->lru_list);
1751                free_buffer(b);
1752        }
1753
1754        for (i = 0; i < LIST_SIZE; i++)
1755                if (c->n_buffers[i])
1756                        DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]);
1757
1758        for (i = 0; i < LIST_SIZE; i++)
1759                BUG_ON(c->n_buffers[i]);
1760
1761        kmem_cache_destroy(c->slab_cache);
1762        kmem_cache_destroy(c->slab_buffer);
1763        dm_io_client_destroy(c->dm_io);
1764        mutex_destroy(&c->lock);
1765        kfree(c);
1766}
1767EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
1768
1769void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start)
1770{
1771        c->start = start;
1772}
1773EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
1774
1775static unsigned get_max_age_hz(void)
1776{
1777        unsigned max_age = READ_ONCE(dm_bufio_max_age);
1778
1779        if (max_age > UINT_MAX / HZ)
1780                max_age = UINT_MAX / HZ;
1781
1782        return max_age * HZ;
1783}
1784
1785static bool older_than(struct dm_buffer *b, unsigned long age_hz)
1786{
1787        return time_after_eq(jiffies, b->last_accessed + age_hz);
1788}
1789
1790static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
1791{
1792        struct dm_buffer *b, *tmp;
1793        unsigned long retain_target = get_retain_buffers(c);
1794        unsigned long count;
1795        LIST_HEAD(write_list);
1796
1797        dm_bufio_lock(c);
1798
1799        __check_watermark(c, &write_list);
1800        if (unlikely(!list_empty(&write_list))) {
1801                dm_bufio_unlock(c);
1802                __flush_write_list(&write_list);
1803                dm_bufio_lock(c);
1804        }
1805
1806        count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1807        list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) {
1808                if (count <= retain_target)
1809                        break;
1810
1811                if (!older_than(b, age_hz))
1812                        break;
1813
1814                if (__try_evict_buffer(b, 0))
1815                        count--;
1816
1817                cond_resched();
1818        }
1819
1820        dm_bufio_unlock(c);
1821}
1822
1823static void do_global_cleanup(struct work_struct *w)
1824{
1825        struct dm_bufio_client *locked_client = NULL;
1826        struct dm_bufio_client *current_client;
1827        struct dm_buffer *b;
1828        unsigned spinlock_hold_count;
1829        unsigned long threshold = dm_bufio_cache_size -
1830                dm_bufio_cache_size / DM_BUFIO_LOW_WATERMARK_RATIO;
1831        unsigned long loops = global_num * 2;
1832
1833        mutex_lock(&dm_bufio_clients_lock);
1834
1835        while (1) {
1836                cond_resched();
1837
1838                spin_lock(&global_spinlock);
1839                if (unlikely(dm_bufio_current_allocated <= threshold))
1840                        break;
1841
1842                spinlock_hold_count = 0;
1843get_next:
1844                if (!loops--)
1845                        break;
1846                if (unlikely(list_empty(&global_queue)))
1847                        break;
1848                b = list_entry(global_queue.prev, struct dm_buffer, global_list);
1849
1850                if (b->accessed) {
1851                        b->accessed = 0;
1852                        list_move(&b->global_list, &global_queue);
1853                        if (likely(++spinlock_hold_count < 16))
1854                                goto get_next;
1855                        spin_unlock(&global_spinlock);
1856                        continue;
1857                }
1858
1859                current_client = b->c;
1860                if (unlikely(current_client != locked_client)) {
1861                        if (locked_client)
1862                                dm_bufio_unlock(locked_client);
1863
1864                        if (!dm_bufio_trylock(current_client)) {
1865                                spin_unlock(&global_spinlock);
1866                                dm_bufio_lock(current_client);
1867                                locked_client = current_client;
1868                                continue;
1869                        }
1870
1871                        locked_client = current_client;
1872                }
1873
1874                spin_unlock(&global_spinlock);
1875
1876                if (unlikely(!__try_evict_buffer(b, GFP_KERNEL))) {
1877                        spin_lock(&global_spinlock);
1878                        list_move(&b->global_list, &global_queue);
1879                        spin_unlock(&global_spinlock);
1880                }
1881        }
1882
1883        spin_unlock(&global_spinlock);
1884
1885        if (locked_client)
1886                dm_bufio_unlock(locked_client);
1887
1888        mutex_unlock(&dm_bufio_clients_lock);
1889}
1890
1891static void cleanup_old_buffers(void)
1892{
1893        unsigned long max_age_hz = get_max_age_hz();
1894        struct dm_bufio_client *c;
1895
1896        mutex_lock(&dm_bufio_clients_lock);
1897
1898        __cache_size_refresh();
1899
1900        list_for_each_entry(c, &dm_bufio_all_clients, client_list)
1901                __evict_old_buffers(c, max_age_hz);
1902
1903        mutex_unlock(&dm_bufio_clients_lock);
1904}
1905
1906static void work_fn(struct work_struct *w)
1907{
1908        cleanup_old_buffers();
1909
1910        queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
1911                           DM_BUFIO_WORK_TIMER_SECS * HZ);
1912}
1913
1914/*----------------------------------------------------------------
1915 * Module setup
1916 *--------------------------------------------------------------*/
1917
1918/*
1919 * This is called only once for the whole dm_bufio module.
1920 * It initializes memory limit.
1921 */
1922static int __init dm_bufio_init(void)
1923{
1924        __u64 mem;
1925
1926        dm_bufio_allocated_kmem_cache = 0;
1927        dm_bufio_allocated_get_free_pages = 0;
1928        dm_bufio_allocated_vmalloc = 0;
1929        dm_bufio_current_allocated = 0;
1930
1931        mem = (__u64)mult_frac(totalram_pages() - totalhigh_pages(),
1932                               DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
1933
1934        if (mem > ULONG_MAX)
1935                mem = ULONG_MAX;
1936
1937#ifdef CONFIG_MMU
1938        if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
1939                mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
1940#endif
1941
1942        dm_bufio_default_cache_size = mem;
1943
1944        mutex_lock(&dm_bufio_clients_lock);
1945        __cache_size_refresh();
1946        mutex_unlock(&dm_bufio_clients_lock);
1947
1948        dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0);
1949        if (!dm_bufio_wq)
1950                return -ENOMEM;
1951
1952        INIT_DELAYED_WORK(&dm_bufio_cleanup_old_work, work_fn);
1953        INIT_WORK(&dm_bufio_replacement_work, do_global_cleanup);
1954        queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
1955                           DM_BUFIO_WORK_TIMER_SECS * HZ);
1956
1957        return 0;
1958}
1959
1960/*
1961 * This is called once when unloading the dm_bufio module.
1962 */
1963static void __exit dm_bufio_exit(void)
1964{
1965        int bug = 0;
1966
1967        cancel_delayed_work_sync(&dm_bufio_cleanup_old_work);
1968        flush_workqueue(dm_bufio_wq);
1969        destroy_workqueue(dm_bufio_wq);
1970
1971        if (dm_bufio_client_count) {
1972                DMCRIT("%s: dm_bufio_client_count leaked: %d",
1973                        __func__, dm_bufio_client_count);
1974                bug = 1;
1975        }
1976
1977        if (dm_bufio_current_allocated) {
1978                DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
1979                        __func__, dm_bufio_current_allocated);
1980                bug = 1;
1981        }
1982
1983        if (dm_bufio_allocated_get_free_pages) {
1984                DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
1985                       __func__, dm_bufio_allocated_get_free_pages);
1986                bug = 1;
1987        }
1988
1989        if (dm_bufio_allocated_vmalloc) {
1990                DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
1991                       __func__, dm_bufio_allocated_vmalloc);
1992                bug = 1;
1993        }
1994
1995        BUG_ON(bug);
1996}
1997
1998module_init(dm_bufio_init)
1999module_exit(dm_bufio_exit)
2000
2001module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR);
2002MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
2003
2004module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
2005MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
2006
2007module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR);
2008MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
2009
2010module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
2011MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
2012
2013module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO);
2014MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
2015
2016module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO);
2017MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
2018
2019module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO);
2020MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
2021
2022module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO);
2023MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
2024
2025MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
2026MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
2027MODULE_LICENSE("GPL");
2028