linux/drivers/md/dm-writecache.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2018 Red Hat. All rights reserved.
   4 *
   5 * This file is released under the GPL.
   6 */
   7
   8#include <linux/device-mapper.h>
   9#include <linux/module.h>
  10#include <linux/init.h>
  11#include <linux/vmalloc.h>
  12#include <linux/kthread.h>
  13#include <linux/dm-io.h>
  14#include <linux/dm-kcopyd.h>
  15#include <linux/dax.h>
  16#include <linux/pfn_t.h>
  17#include <linux/libnvdimm.h>
  18
  19#define DM_MSG_PREFIX "writecache"
  20
  21#define HIGH_WATERMARK                  50
  22#define LOW_WATERMARK                   45
  23#define MAX_WRITEBACK_JOBS              0
  24#define ENDIO_LATENCY                   16
  25#define WRITEBACK_LATENCY               64
  26#define AUTOCOMMIT_BLOCKS_SSD           65536
  27#define AUTOCOMMIT_BLOCKS_PMEM          64
  28#define AUTOCOMMIT_MSEC                 1000
  29#define MAX_AGE_DIV                     16
  30#define MAX_AGE_UNSPECIFIED             -1UL
  31
  32#define BITMAP_GRANULARITY      65536
  33#if BITMAP_GRANULARITY < PAGE_SIZE
  34#undef BITMAP_GRANULARITY
  35#define BITMAP_GRANULARITY      PAGE_SIZE
  36#endif
  37
  38#if IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && IS_ENABLED(CONFIG_DAX_DRIVER)
  39#define DM_WRITECACHE_HAS_PMEM
  40#endif
  41
  42#ifdef DM_WRITECACHE_HAS_PMEM
  43#define pmem_assign(dest, src)                                  \
  44do {                                                            \
  45        typeof(dest) uniq = (src);                              \
  46        memcpy_flushcache(&(dest), &uniq, sizeof(dest));        \
  47} while (0)
  48#else
  49#define pmem_assign(dest, src)  ((dest) = (src))
  50#endif
  51
  52#if defined(__HAVE_ARCH_MEMCPY_MCSAFE) && defined(DM_WRITECACHE_HAS_PMEM)
  53#define DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
  54#endif
  55
  56#define MEMORY_SUPERBLOCK_MAGIC         0x23489321
  57#define MEMORY_SUPERBLOCK_VERSION       1
  58
  59struct wc_memory_entry {
  60        __le64 original_sector;
  61        __le64 seq_count;
  62};
  63
  64struct wc_memory_superblock {
  65        union {
  66                struct {
  67                        __le32 magic;
  68                        __le32 version;
  69                        __le32 block_size;
  70                        __le32 pad;
  71                        __le64 n_blocks;
  72                        __le64 seq_count;
  73                };
  74                __le64 padding[8];
  75        };
  76        struct wc_memory_entry entries[0];
  77};
  78
  79struct wc_entry {
  80        struct rb_node rb_node;
  81        struct list_head lru;
  82        unsigned short wc_list_contiguous;
  83        bool write_in_progress
  84#if BITS_PER_LONG == 64
  85                :1
  86#endif
  87        ;
  88        unsigned long index
  89#if BITS_PER_LONG == 64
  90                :47
  91#endif
  92        ;
  93        unsigned long age;
  94#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
  95        uint64_t original_sector;
  96        uint64_t seq_count;
  97#endif
  98};
  99
 100#ifdef DM_WRITECACHE_HAS_PMEM
 101#define WC_MODE_PMEM(wc)                        ((wc)->pmem_mode)
 102#define WC_MODE_FUA(wc)                         ((wc)->writeback_fua)
 103#else
 104#define WC_MODE_PMEM(wc)                        false
 105#define WC_MODE_FUA(wc)                         false
 106#endif
 107#define WC_MODE_SORT_FREELIST(wc)               (!WC_MODE_PMEM(wc))
 108
 109struct dm_writecache {
 110        struct mutex lock;
 111        struct list_head lru;
 112        union {
 113                struct list_head freelist;
 114                struct {
 115                        struct rb_root freetree;
 116                        struct wc_entry *current_free;
 117                };
 118        };
 119        struct rb_root tree;
 120
 121        size_t freelist_size;
 122        size_t writeback_size;
 123        size_t freelist_high_watermark;
 124        size_t freelist_low_watermark;
 125        unsigned long max_age;
 126
 127        unsigned uncommitted_blocks;
 128        unsigned autocommit_blocks;
 129        unsigned max_writeback_jobs;
 130
 131        int error;
 132
 133        unsigned long autocommit_jiffies;
 134        struct timer_list autocommit_timer;
 135        struct wait_queue_head freelist_wait;
 136
 137        struct timer_list max_age_timer;
 138
 139        atomic_t bio_in_progress[2];
 140        struct wait_queue_head bio_in_progress_wait[2];
 141
 142        struct dm_target *ti;
 143        struct dm_dev *dev;
 144        struct dm_dev *ssd_dev;
 145        sector_t start_sector;
 146        void *memory_map;
 147        uint64_t memory_map_size;
 148        size_t metadata_sectors;
 149        size_t n_blocks;
 150        uint64_t seq_count;
 151        void *block_start;
 152        struct wc_entry *entries;
 153        unsigned block_size;
 154        unsigned char block_size_bits;
 155
 156        bool pmem_mode:1;
 157        bool writeback_fua:1;
 158
 159        bool overwrote_committed:1;
 160        bool memory_vmapped:1;
 161
 162        bool high_wm_percent_set:1;
 163        bool low_wm_percent_set:1;
 164        bool max_writeback_jobs_set:1;
 165        bool autocommit_blocks_set:1;
 166        bool autocommit_time_set:1;
 167        bool writeback_fua_set:1;
 168        bool flush_on_suspend:1;
 169        bool cleaner:1;
 170
 171        unsigned writeback_all;
 172        struct workqueue_struct *writeback_wq;
 173        struct work_struct writeback_work;
 174        struct work_struct flush_work;
 175
 176        struct dm_io_client *dm_io;
 177
 178        raw_spinlock_t endio_list_lock;
 179        struct list_head endio_list;
 180        struct task_struct *endio_thread;
 181
 182        struct task_struct *flush_thread;
 183        struct bio_list flush_list;
 184
 185        struct dm_kcopyd_client *dm_kcopyd;
 186        unsigned long *dirty_bitmap;
 187        unsigned dirty_bitmap_size;
 188
 189        struct bio_set bio_set;
 190        mempool_t copy_pool;
 191};
 192
 193#define WB_LIST_INLINE          16
 194
 195struct writeback_struct {
 196        struct list_head endio_entry;
 197        struct dm_writecache *wc;
 198        struct wc_entry **wc_list;
 199        unsigned wc_list_n;
 200        struct wc_entry *wc_list_inline[WB_LIST_INLINE];
 201        struct bio bio;
 202};
 203
 204struct copy_struct {
 205        struct list_head endio_entry;
 206        struct dm_writecache *wc;
 207        struct wc_entry *e;
 208        unsigned n_entries;
 209        int error;
 210};
 211
 212DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(dm_writecache_throttle,
 213                                            "A percentage of time allocated for data copying");
 214
 215static void wc_lock(struct dm_writecache *wc)
 216{
 217        mutex_lock(&wc->lock);
 218}
 219
 220static void wc_unlock(struct dm_writecache *wc)
 221{
 222        mutex_unlock(&wc->lock);
 223}
 224
 225#ifdef DM_WRITECACHE_HAS_PMEM
 226static int persistent_memory_claim(struct dm_writecache *wc)
 227{
 228        int r;
 229        loff_t s;
 230        long p, da;
 231        pfn_t pfn;
 232        int id;
 233        struct page **pages;
 234        sector_t offset;
 235
 236        wc->memory_vmapped = false;
 237
 238        s = wc->memory_map_size;
 239        p = s >> PAGE_SHIFT;
 240        if (!p) {
 241                r = -EINVAL;
 242                goto err1;
 243        }
 244        if (p != s >> PAGE_SHIFT) {
 245                r = -EOVERFLOW;
 246                goto err1;
 247        }
 248
 249        offset = get_start_sect(wc->ssd_dev->bdev);
 250        if (offset & (PAGE_SIZE / 512 - 1)) {
 251                r = -EINVAL;
 252                goto err1;
 253        }
 254        offset >>= PAGE_SHIFT - 9;
 255
 256        id = dax_read_lock();
 257
 258        da = dax_direct_access(wc->ssd_dev->dax_dev, offset, p, &wc->memory_map, &pfn);
 259        if (da < 0) {
 260                wc->memory_map = NULL;
 261                r = da;
 262                goto err2;
 263        }
 264        if (!pfn_t_has_page(pfn)) {
 265                wc->memory_map = NULL;
 266                r = -EOPNOTSUPP;
 267                goto err2;
 268        }
 269        if (da != p) {
 270                long i;
 271                wc->memory_map = NULL;
 272                pages = kvmalloc_array(p, sizeof(struct page *), GFP_KERNEL);
 273                if (!pages) {
 274                        r = -ENOMEM;
 275                        goto err2;
 276                }
 277                i = 0;
 278                do {
 279                        long daa;
 280                        daa = dax_direct_access(wc->ssd_dev->dax_dev, offset + i, p - i,
 281                                                NULL, &pfn);
 282                        if (daa <= 0) {
 283                                r = daa ? daa : -EINVAL;
 284                                goto err3;
 285                        }
 286                        if (!pfn_t_has_page(pfn)) {
 287                                r = -EOPNOTSUPP;
 288                                goto err3;
 289                        }
 290                        while (daa-- && i < p) {
 291                                pages[i++] = pfn_t_to_page(pfn);
 292                                pfn.val++;
 293                                if (!(i & 15))
 294                                        cond_resched();
 295                        }
 296                } while (i < p);
 297                wc->memory_map = vmap(pages, p, VM_MAP, PAGE_KERNEL);
 298                if (!wc->memory_map) {
 299                        r = -ENOMEM;
 300                        goto err3;
 301                }
 302                kvfree(pages);
 303                wc->memory_vmapped = true;
 304        }
 305
 306        dax_read_unlock(id);
 307
 308        wc->memory_map += (size_t)wc->start_sector << SECTOR_SHIFT;
 309        wc->memory_map_size -= (size_t)wc->start_sector << SECTOR_SHIFT;
 310
 311        return 0;
 312err3:
 313        kvfree(pages);
 314err2:
 315        dax_read_unlock(id);
 316err1:
 317        return r;
 318}
 319#else
 320static int persistent_memory_claim(struct dm_writecache *wc)
 321{
 322        BUG();
 323}
 324#endif
 325
 326static void persistent_memory_release(struct dm_writecache *wc)
 327{
 328        if (wc->memory_vmapped)
 329                vunmap(wc->memory_map - ((size_t)wc->start_sector << SECTOR_SHIFT));
 330}
 331
 332static struct page *persistent_memory_page(void *addr)
 333{
 334        if (is_vmalloc_addr(addr))
 335                return vmalloc_to_page(addr);
 336        else
 337                return virt_to_page(addr);
 338}
 339
 340static unsigned persistent_memory_page_offset(void *addr)
 341{
 342        return (unsigned long)addr & (PAGE_SIZE - 1);
 343}
 344
 345static void persistent_memory_flush_cache(void *ptr, size_t size)
 346{
 347        if (is_vmalloc_addr(ptr))
 348                flush_kernel_vmap_range(ptr, size);
 349}
 350
 351static void persistent_memory_invalidate_cache(void *ptr, size_t size)
 352{
 353        if (is_vmalloc_addr(ptr))
 354                invalidate_kernel_vmap_range(ptr, size);
 355}
 356
 357static struct wc_memory_superblock *sb(struct dm_writecache *wc)
 358{
 359        return wc->memory_map;
 360}
 361
 362static struct wc_memory_entry *memory_entry(struct dm_writecache *wc, struct wc_entry *e)
 363{
 364        return &sb(wc)->entries[e->index];
 365}
 366
 367static void *memory_data(struct dm_writecache *wc, struct wc_entry *e)
 368{
 369        return (char *)wc->block_start + (e->index << wc->block_size_bits);
 370}
 371
 372static sector_t cache_sector(struct dm_writecache *wc, struct wc_entry *e)
 373{
 374        return wc->start_sector + wc->metadata_sectors +
 375                ((sector_t)e->index << (wc->block_size_bits - SECTOR_SHIFT));
 376}
 377
 378static uint64_t read_original_sector(struct dm_writecache *wc, struct wc_entry *e)
 379{
 380#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
 381        return e->original_sector;
 382#else
 383        return le64_to_cpu(memory_entry(wc, e)->original_sector);
 384#endif
 385}
 386
 387static uint64_t read_seq_count(struct dm_writecache *wc, struct wc_entry *e)
 388{
 389#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
 390        return e->seq_count;
 391#else
 392        return le64_to_cpu(memory_entry(wc, e)->seq_count);
 393#endif
 394}
 395
 396static void clear_seq_count(struct dm_writecache *wc, struct wc_entry *e)
 397{
 398#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
 399        e->seq_count = -1;
 400#endif
 401        pmem_assign(memory_entry(wc, e)->seq_count, cpu_to_le64(-1));
 402}
 403
 404static void write_original_sector_seq_count(struct dm_writecache *wc, struct wc_entry *e,
 405                                            uint64_t original_sector, uint64_t seq_count)
 406{
 407        struct wc_memory_entry me;
 408#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
 409        e->original_sector = original_sector;
 410        e->seq_count = seq_count;
 411#endif
 412        me.original_sector = cpu_to_le64(original_sector);
 413        me.seq_count = cpu_to_le64(seq_count);
 414        pmem_assign(*memory_entry(wc, e), me);
 415}
 416
 417#define writecache_error(wc, err, msg, arg...)                          \
 418do {                                                                    \
 419        if (!cmpxchg(&(wc)->error, 0, err))                             \
 420                DMERR(msg, ##arg);                                      \
 421        wake_up(&(wc)->freelist_wait);                                  \
 422} while (0)
 423
 424#define writecache_has_error(wc)        (unlikely(READ_ONCE((wc)->error)))
 425
 426static void writecache_flush_all_metadata(struct dm_writecache *wc)
 427{
 428        if (!WC_MODE_PMEM(wc))
 429                memset(wc->dirty_bitmap, -1, wc->dirty_bitmap_size);
 430}
 431
 432static void writecache_flush_region(struct dm_writecache *wc, void *ptr, size_t size)
 433{
 434        if (!WC_MODE_PMEM(wc))
 435                __set_bit(((char *)ptr - (char *)wc->memory_map) / BITMAP_GRANULARITY,
 436                          wc->dirty_bitmap);
 437}
 438
 439static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev);
 440
 441struct io_notify {
 442        struct dm_writecache *wc;
 443        struct completion c;
 444        atomic_t count;
 445};
 446
 447static void writecache_notify_io(unsigned long error, void *context)
 448{
 449        struct io_notify *endio = context;
 450
 451        if (unlikely(error != 0))
 452                writecache_error(endio->wc, -EIO, "error writing metadata");
 453        BUG_ON(atomic_read(&endio->count) <= 0);
 454        if (atomic_dec_and_test(&endio->count))
 455                complete(&endio->c);
 456}
 457
 458static void writecache_wait_for_ios(struct dm_writecache *wc, int direction)
 459{
 460        wait_event(wc->bio_in_progress_wait[direction],
 461                   !atomic_read(&wc->bio_in_progress[direction]));
 462}
 463
 464static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
 465{
 466        struct dm_io_region region;
 467        struct dm_io_request req;
 468        struct io_notify endio = {
 469                wc,
 470                COMPLETION_INITIALIZER_ONSTACK(endio.c),
 471                ATOMIC_INIT(1),
 472        };
 473        unsigned bitmap_bits = wc->dirty_bitmap_size * 8;
 474        unsigned i = 0;
 475
 476        while (1) {
 477                unsigned j;
 478                i = find_next_bit(wc->dirty_bitmap, bitmap_bits, i);
 479                if (unlikely(i == bitmap_bits))
 480                        break;
 481                j = find_next_zero_bit(wc->dirty_bitmap, bitmap_bits, i);
 482
 483                region.bdev = wc->ssd_dev->bdev;
 484                region.sector = (sector_t)i * (BITMAP_GRANULARITY >> SECTOR_SHIFT);
 485                region.count = (sector_t)(j - i) * (BITMAP_GRANULARITY >> SECTOR_SHIFT);
 486
 487                if (unlikely(region.sector >= wc->metadata_sectors))
 488                        break;
 489                if (unlikely(region.sector + region.count > wc->metadata_sectors))
 490                        region.count = wc->metadata_sectors - region.sector;
 491
 492                region.sector += wc->start_sector;
 493                atomic_inc(&endio.count);
 494                req.bi_op = REQ_OP_WRITE;
 495                req.bi_op_flags = REQ_SYNC;
 496                req.mem.type = DM_IO_VMA;
 497                req.mem.ptr.vma = (char *)wc->memory_map + (size_t)i * BITMAP_GRANULARITY;
 498                req.client = wc->dm_io;
 499                req.notify.fn = writecache_notify_io;
 500                req.notify.context = &endio;
 501
 502                /* writing via async dm-io (implied by notify.fn above) won't return an error */
 503                (void) dm_io(&req, 1, &region, NULL);
 504                i = j;
 505        }
 506
 507        writecache_notify_io(0, &endio);
 508        wait_for_completion_io(&endio.c);
 509
 510        if (wait_for_ios)
 511                writecache_wait_for_ios(wc, WRITE);
 512
 513        writecache_disk_flush(wc, wc->ssd_dev);
 514
 515        memset(wc->dirty_bitmap, 0, wc->dirty_bitmap_size);
 516}
 517
 518static void ssd_commit_superblock(struct dm_writecache *wc)
 519{
 520        int r;
 521        struct dm_io_region region;
 522        struct dm_io_request req;
 523
 524        region.bdev = wc->ssd_dev->bdev;
 525        region.sector = 0;
 526        region.count = PAGE_SIZE;
 527
 528        if (unlikely(region.sector + region.count > wc->metadata_sectors))
 529                region.count = wc->metadata_sectors - region.sector;
 530
 531        region.sector += wc->start_sector;
 532
 533        req.bi_op = REQ_OP_WRITE;
 534        req.bi_op_flags = REQ_SYNC | REQ_FUA;
 535        req.mem.type = DM_IO_VMA;
 536        req.mem.ptr.vma = (char *)wc->memory_map;
 537        req.client = wc->dm_io;
 538        req.notify.fn = NULL;
 539        req.notify.context = NULL;
 540
 541        r = dm_io(&req, 1, &region, NULL);
 542        if (unlikely(r))
 543                writecache_error(wc, r, "error writing superblock");
 544}
 545
 546static void writecache_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
 547{
 548        if (WC_MODE_PMEM(wc))
 549                pmem_wmb();
 550        else
 551                ssd_commit_flushed(wc, wait_for_ios);
 552}
 553
 554static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev)
 555{
 556        int r;
 557        struct dm_io_region region;
 558        struct dm_io_request req;
 559
 560        region.bdev = dev->bdev;
 561        region.sector = 0;
 562        region.count = 0;
 563        req.bi_op = REQ_OP_WRITE;
 564        req.bi_op_flags = REQ_PREFLUSH;
 565        req.mem.type = DM_IO_KMEM;
 566        req.mem.ptr.addr = NULL;
 567        req.client = wc->dm_io;
 568        req.notify.fn = NULL;
 569
 570        r = dm_io(&req, 1, &region, NULL);
 571        if (unlikely(r))
 572                writecache_error(wc, r, "error flushing metadata: %d", r);
 573}
 574
 575#define WFE_RETURN_FOLLOWING    1
 576#define WFE_LOWEST_SEQ          2
 577
 578static struct wc_entry *writecache_find_entry(struct dm_writecache *wc,
 579                                              uint64_t block, int flags)
 580{
 581        struct wc_entry *e;
 582        struct rb_node *node = wc->tree.rb_node;
 583
 584        if (unlikely(!node))
 585                return NULL;
 586
 587        while (1) {
 588                e = container_of(node, struct wc_entry, rb_node);
 589                if (read_original_sector(wc, e) == block)
 590                        break;
 591
 592                node = (read_original_sector(wc, e) >= block ?
 593                        e->rb_node.rb_left : e->rb_node.rb_right);
 594                if (unlikely(!node)) {
 595                        if (!(flags & WFE_RETURN_FOLLOWING))
 596                                return NULL;
 597                        if (read_original_sector(wc, e) >= block) {
 598                                return e;
 599                        } else {
 600                                node = rb_next(&e->rb_node);
 601                                if (unlikely(!node))
 602                                        return NULL;
 603                                e = container_of(node, struct wc_entry, rb_node);
 604                                return e;
 605                        }
 606                }
 607        }
 608
 609        while (1) {
 610                struct wc_entry *e2;
 611                if (flags & WFE_LOWEST_SEQ)
 612                        node = rb_prev(&e->rb_node);
 613                else
 614                        node = rb_next(&e->rb_node);
 615                if (unlikely(!node))
 616                        return e;
 617                e2 = container_of(node, struct wc_entry, rb_node);
 618                if (read_original_sector(wc, e2) != block)
 619                        return e;
 620                e = e2;
 621        }
 622}
 623
 624static void writecache_insert_entry(struct dm_writecache *wc, struct wc_entry *ins)
 625{
 626        struct wc_entry *e;
 627        struct rb_node **node = &wc->tree.rb_node, *parent = NULL;
 628
 629        while (*node) {
 630                e = container_of(*node, struct wc_entry, rb_node);
 631                parent = &e->rb_node;
 632                if (read_original_sector(wc, e) > read_original_sector(wc, ins))
 633                        node = &parent->rb_left;
 634                else
 635                        node = &parent->rb_right;
 636        }
 637        rb_link_node(&ins->rb_node, parent, node);
 638        rb_insert_color(&ins->rb_node, &wc->tree);
 639        list_add(&ins->lru, &wc->lru);
 640        ins->age = jiffies;
 641}
 642
 643static void writecache_unlink(struct dm_writecache *wc, struct wc_entry *e)
 644{
 645        list_del(&e->lru);
 646        rb_erase(&e->rb_node, &wc->tree);
 647}
 648
 649static void writecache_add_to_freelist(struct dm_writecache *wc, struct wc_entry *e)
 650{
 651        if (WC_MODE_SORT_FREELIST(wc)) {
 652                struct rb_node **node = &wc->freetree.rb_node, *parent = NULL;
 653                if (unlikely(!*node))
 654                        wc->current_free = e;
 655                while (*node) {
 656                        parent = *node;
 657                        if (&e->rb_node < *node)
 658                                node = &parent->rb_left;
 659                        else
 660                                node = &parent->rb_right;
 661                }
 662                rb_link_node(&e->rb_node, parent, node);
 663                rb_insert_color(&e->rb_node, &wc->freetree);
 664        } else {
 665                list_add_tail(&e->lru, &wc->freelist);
 666        }
 667        wc->freelist_size++;
 668}
 669
 670static inline void writecache_verify_watermark(struct dm_writecache *wc)
 671{
 672        if (unlikely(wc->freelist_size + wc->writeback_size <= wc->freelist_high_watermark))
 673                queue_work(wc->writeback_wq, &wc->writeback_work);
 674}
 675
 676static void writecache_max_age_timer(struct timer_list *t)
 677{
 678        struct dm_writecache *wc = from_timer(wc, t, max_age_timer);
 679
 680        if (!dm_suspended(wc->ti) && !writecache_has_error(wc)) {
 681                queue_work(wc->writeback_wq, &wc->writeback_work);
 682                mod_timer(&wc->max_age_timer, jiffies + wc->max_age / MAX_AGE_DIV);
 683        }
 684}
 685
 686static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc, sector_t expected_sector)
 687{
 688        struct wc_entry *e;
 689
 690        if (WC_MODE_SORT_FREELIST(wc)) {
 691                struct rb_node *next;
 692                if (unlikely(!wc->current_free))
 693                        return NULL;
 694                e = wc->current_free;
 695                if (expected_sector != (sector_t)-1 && unlikely(cache_sector(wc, e) != expected_sector))
 696                        return NULL;
 697                next = rb_next(&e->rb_node);
 698                rb_erase(&e->rb_node, &wc->freetree);
 699                if (unlikely(!next))
 700                        next = rb_first(&wc->freetree);
 701                wc->current_free = next ? container_of(next, struct wc_entry, rb_node) : NULL;
 702        } else {
 703                if (unlikely(list_empty(&wc->freelist)))
 704                        return NULL;
 705                e = container_of(wc->freelist.next, struct wc_entry, lru);
 706                if (expected_sector != (sector_t)-1 && unlikely(cache_sector(wc, e) != expected_sector))
 707                        return NULL;
 708                list_del(&e->lru);
 709        }
 710        wc->freelist_size--;
 711
 712        writecache_verify_watermark(wc);
 713
 714        return e;
 715}
 716
 717static void writecache_free_entry(struct dm_writecache *wc, struct wc_entry *e)
 718{
 719        writecache_unlink(wc, e);
 720        writecache_add_to_freelist(wc, e);
 721        clear_seq_count(wc, e);
 722        writecache_flush_region(wc, memory_entry(wc, e), sizeof(struct wc_memory_entry));
 723        if (unlikely(waitqueue_active(&wc->freelist_wait)))
 724                wake_up(&wc->freelist_wait);
 725}
 726
 727static void writecache_wait_on_freelist(struct dm_writecache *wc)
 728{
 729        DEFINE_WAIT(wait);
 730
 731        prepare_to_wait(&wc->freelist_wait, &wait, TASK_UNINTERRUPTIBLE);
 732        wc_unlock(wc);
 733        io_schedule();
 734        finish_wait(&wc->freelist_wait, &wait);
 735        wc_lock(wc);
 736}
 737
 738static void writecache_poison_lists(struct dm_writecache *wc)
 739{
 740        /*
 741         * Catch incorrect access to these values while the device is suspended.
 742         */
 743        memset(&wc->tree, -1, sizeof wc->tree);
 744        wc->lru.next = LIST_POISON1;
 745        wc->lru.prev = LIST_POISON2;
 746        wc->freelist.next = LIST_POISON1;
 747        wc->freelist.prev = LIST_POISON2;
 748}
 749
 750static void writecache_flush_entry(struct dm_writecache *wc, struct wc_entry *e)
 751{
 752        writecache_flush_region(wc, memory_entry(wc, e), sizeof(struct wc_memory_entry));
 753        if (WC_MODE_PMEM(wc))
 754                writecache_flush_region(wc, memory_data(wc, e), wc->block_size);
 755}
 756
 757static bool writecache_entry_is_committed(struct dm_writecache *wc, struct wc_entry *e)
 758{
 759        return read_seq_count(wc, e) < wc->seq_count;
 760}
 761
 762static void writecache_flush(struct dm_writecache *wc)
 763{
 764        struct wc_entry *e, *e2;
 765        bool need_flush_after_free;
 766
 767        wc->uncommitted_blocks = 0;
 768        del_timer(&wc->autocommit_timer);
 769
 770        if (list_empty(&wc->lru))
 771                return;
 772
 773        e = container_of(wc->lru.next, struct wc_entry, lru);
 774        if (writecache_entry_is_committed(wc, e)) {
 775                if (wc->overwrote_committed) {
 776                        writecache_wait_for_ios(wc, WRITE);
 777                        writecache_disk_flush(wc, wc->ssd_dev);
 778                        wc->overwrote_committed = false;
 779                }
 780                return;
 781        }
 782        while (1) {
 783                writecache_flush_entry(wc, e);
 784                if (unlikely(e->lru.next == &wc->lru))
 785                        break;
 786                e2 = container_of(e->lru.next, struct wc_entry, lru);
 787                if (writecache_entry_is_committed(wc, e2))
 788                        break;
 789                e = e2;
 790                cond_resched();
 791        }
 792        writecache_commit_flushed(wc, true);
 793
 794        wc->seq_count++;
 795        pmem_assign(sb(wc)->seq_count, cpu_to_le64(wc->seq_count));
 796        if (WC_MODE_PMEM(wc))
 797                writecache_commit_flushed(wc, false);
 798        else
 799                ssd_commit_superblock(wc);
 800
 801        wc->overwrote_committed = false;
 802
 803        need_flush_after_free = false;
 804        while (1) {
 805                /* Free another committed entry with lower seq-count */
 806                struct rb_node *rb_node = rb_prev(&e->rb_node);
 807
 808                if (rb_node) {
 809                        e2 = container_of(rb_node, struct wc_entry, rb_node);
 810                        if (read_original_sector(wc, e2) == read_original_sector(wc, e) &&
 811                            likely(!e2->write_in_progress)) {
 812                                writecache_free_entry(wc, e2);
 813                                need_flush_after_free = true;
 814                        }
 815                }
 816                if (unlikely(e->lru.prev == &wc->lru))
 817                        break;
 818                e = container_of(e->lru.prev, struct wc_entry, lru);
 819                cond_resched();
 820        }
 821
 822        if (need_flush_after_free)
 823                writecache_commit_flushed(wc, false);
 824}
 825
 826static void writecache_flush_work(struct work_struct *work)
 827{
 828        struct dm_writecache *wc = container_of(work, struct dm_writecache, flush_work);
 829
 830        wc_lock(wc);
 831        writecache_flush(wc);
 832        wc_unlock(wc);
 833}
 834
 835static void writecache_autocommit_timer(struct timer_list *t)
 836{
 837        struct dm_writecache *wc = from_timer(wc, t, autocommit_timer);
 838        if (!writecache_has_error(wc))
 839                queue_work(wc->writeback_wq, &wc->flush_work);
 840}
 841
 842static void writecache_schedule_autocommit(struct dm_writecache *wc)
 843{
 844        if (!timer_pending(&wc->autocommit_timer))
 845                mod_timer(&wc->autocommit_timer, jiffies + wc->autocommit_jiffies);
 846}
 847
 848static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_t end)
 849{
 850        struct wc_entry *e;
 851        bool discarded_something = false;
 852
 853        e = writecache_find_entry(wc, start, WFE_RETURN_FOLLOWING | WFE_LOWEST_SEQ);
 854        if (unlikely(!e))
 855                return;
 856
 857        while (read_original_sector(wc, e) < end) {
 858                struct rb_node *node = rb_next(&e->rb_node);
 859
 860                if (likely(!e->write_in_progress)) {
 861                        if (!discarded_something) {
 862                                if (!WC_MODE_PMEM(wc)) {
 863                                        writecache_wait_for_ios(wc, READ);
 864                                        writecache_wait_for_ios(wc, WRITE);
 865                                }
 866                                discarded_something = true;
 867                        }
 868                        if (!writecache_entry_is_committed(wc, e))
 869                                wc->uncommitted_blocks--;
 870                        writecache_free_entry(wc, e);
 871                }
 872
 873                if (unlikely(!node))
 874                        break;
 875
 876                e = container_of(node, struct wc_entry, rb_node);
 877        }
 878
 879        if (discarded_something)
 880                writecache_commit_flushed(wc, false);
 881}
 882
 883static bool writecache_wait_for_writeback(struct dm_writecache *wc)
 884{
 885        if (wc->writeback_size) {
 886                writecache_wait_on_freelist(wc);
 887                return true;
 888        }
 889        return false;
 890}
 891
 892static void writecache_suspend(struct dm_target *ti)
 893{
 894        struct dm_writecache *wc = ti->private;
 895        bool flush_on_suspend;
 896
 897        del_timer_sync(&wc->autocommit_timer);
 898        del_timer_sync(&wc->max_age_timer);
 899
 900        wc_lock(wc);
 901        writecache_flush(wc);
 902        flush_on_suspend = wc->flush_on_suspend;
 903        if (flush_on_suspend) {
 904                wc->flush_on_suspend = false;
 905                wc->writeback_all++;
 906                queue_work(wc->writeback_wq, &wc->writeback_work);
 907        }
 908        wc_unlock(wc);
 909
 910        drain_workqueue(wc->writeback_wq);
 911
 912        wc_lock(wc);
 913        if (flush_on_suspend)
 914                wc->writeback_all--;
 915        while (writecache_wait_for_writeback(wc));
 916
 917        if (WC_MODE_PMEM(wc))
 918                persistent_memory_flush_cache(wc->memory_map, wc->memory_map_size);
 919
 920        writecache_poison_lists(wc);
 921
 922        wc_unlock(wc);
 923}
 924
 925static int writecache_alloc_entries(struct dm_writecache *wc)
 926{
 927        size_t b;
 928
 929        if (wc->entries)
 930                return 0;
 931        wc->entries = vmalloc(array_size(sizeof(struct wc_entry), wc->n_blocks));
 932        if (!wc->entries)
 933                return -ENOMEM;
 934        for (b = 0; b < wc->n_blocks; b++) {
 935                struct wc_entry *e = &wc->entries[b];
 936                e->index = b;
 937                e->write_in_progress = false;
 938                cond_resched();
 939        }
 940
 941        return 0;
 942}
 943
 944static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors)
 945{
 946        struct dm_io_region region;
 947        struct dm_io_request req;
 948
 949        region.bdev = wc->ssd_dev->bdev;
 950        region.sector = wc->start_sector;
 951        region.count = n_sectors;
 952        req.bi_op = REQ_OP_READ;
 953        req.bi_op_flags = REQ_SYNC;
 954        req.mem.type = DM_IO_VMA;
 955        req.mem.ptr.vma = (char *)wc->memory_map;
 956        req.client = wc->dm_io;
 957        req.notify.fn = NULL;
 958
 959        return dm_io(&req, 1, &region, NULL);
 960}
 961
 962static void writecache_resume(struct dm_target *ti)
 963{
 964        struct dm_writecache *wc = ti->private;
 965        size_t b;
 966        bool need_flush = false;
 967        __le64 sb_seq_count;
 968        int r;
 969
 970        wc_lock(wc);
 971
 972        if (WC_MODE_PMEM(wc)) {
 973                persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size);
 974        } else {
 975                r = writecache_read_metadata(wc, wc->metadata_sectors);
 976                if (r) {
 977                        size_t sb_entries_offset;
 978                        writecache_error(wc, r, "unable to read metadata: %d", r);
 979                        sb_entries_offset = offsetof(struct wc_memory_superblock, entries);
 980                        memset((char *)wc->memory_map + sb_entries_offset, -1,
 981                               (wc->metadata_sectors << SECTOR_SHIFT) - sb_entries_offset);
 982                }
 983        }
 984
 985        wc->tree = RB_ROOT;
 986        INIT_LIST_HEAD(&wc->lru);
 987        if (WC_MODE_SORT_FREELIST(wc)) {
 988                wc->freetree = RB_ROOT;
 989                wc->current_free = NULL;
 990        } else {
 991                INIT_LIST_HEAD(&wc->freelist);
 992        }
 993        wc->freelist_size = 0;
 994
 995        r = memcpy_mcsafe(&sb_seq_count, &sb(wc)->seq_count, sizeof(uint64_t));
 996        if (r) {
 997                writecache_error(wc, r, "hardware memory error when reading superblock: %d", r);
 998                sb_seq_count = cpu_to_le64(0);
 999        }
1000        wc->seq_count = le64_to_cpu(sb_seq_count);
1001
1002#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
1003        for (b = 0; b < wc->n_blocks; b++) {
1004                struct wc_entry *e = &wc->entries[b];
1005                struct wc_memory_entry wme;
1006                if (writecache_has_error(wc)) {
1007                        e->original_sector = -1;
1008                        e->seq_count = -1;
1009                        continue;
1010                }
1011                r = memcpy_mcsafe(&wme, memory_entry(wc, e), sizeof(struct wc_memory_entry));
1012                if (r) {
1013                        writecache_error(wc, r, "hardware memory error when reading metadata entry %lu: %d",
1014                                         (unsigned long)b, r);
1015                        e->original_sector = -1;
1016                        e->seq_count = -1;
1017                } else {
1018                        e->original_sector = le64_to_cpu(wme.original_sector);
1019                        e->seq_count = le64_to_cpu(wme.seq_count);
1020                }
1021                cond_resched();
1022        }
1023#endif
1024        for (b = 0; b < wc->n_blocks; b++) {
1025                struct wc_entry *e = &wc->entries[b];
1026                if (!writecache_entry_is_committed(wc, e)) {
1027                        if (read_seq_count(wc, e) != -1) {
1028erase_this:
1029                                clear_seq_count(wc, e);
1030                                need_flush = true;
1031                        }
1032                        writecache_add_to_freelist(wc, e);
1033                } else {
1034                        struct wc_entry *old;
1035
1036                        old = writecache_find_entry(wc, read_original_sector(wc, e), 0);
1037                        if (!old) {
1038                                writecache_insert_entry(wc, e);
1039                        } else {
1040                                if (read_seq_count(wc, old) == read_seq_count(wc, e)) {
1041                                        writecache_error(wc, -EINVAL,
1042                                                 "two identical entries, position %llu, sector %llu, sequence %llu",
1043                                                 (unsigned long long)b, (unsigned long long)read_original_sector(wc, e),
1044                                                 (unsigned long long)read_seq_count(wc, e));
1045                                }
1046                                if (read_seq_count(wc, old) > read_seq_count(wc, e)) {
1047                                        goto erase_this;
1048                                } else {
1049                                        writecache_free_entry(wc, old);
1050                                        writecache_insert_entry(wc, e);
1051                                        need_flush = true;
1052                                }
1053                        }
1054                }
1055                cond_resched();
1056        }
1057
1058        if (need_flush) {
1059                writecache_flush_all_metadata(wc);
1060                writecache_commit_flushed(wc, false);
1061        }
1062
1063        writecache_verify_watermark(wc);
1064
1065        if (wc->max_age != MAX_AGE_UNSPECIFIED)
1066                mod_timer(&wc->max_age_timer, jiffies + wc->max_age / MAX_AGE_DIV);
1067
1068        wc_unlock(wc);
1069}
1070
1071static int process_flush_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
1072{
1073        if (argc != 1)
1074                return -EINVAL;
1075
1076        wc_lock(wc);
1077        if (dm_suspended(wc->ti)) {
1078                wc_unlock(wc);
1079                return -EBUSY;
1080        }
1081        if (writecache_has_error(wc)) {
1082                wc_unlock(wc);
1083                return -EIO;
1084        }
1085
1086        writecache_flush(wc);
1087        wc->writeback_all++;
1088        queue_work(wc->writeback_wq, &wc->writeback_work);
1089        wc_unlock(wc);
1090
1091        flush_workqueue(wc->writeback_wq);
1092
1093        wc_lock(wc);
1094        wc->writeback_all--;
1095        if (writecache_has_error(wc)) {
1096                wc_unlock(wc);
1097                return -EIO;
1098        }
1099        wc_unlock(wc);
1100
1101        return 0;
1102}
1103
1104static int process_flush_on_suspend_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
1105{
1106        if (argc != 1)
1107                return -EINVAL;
1108
1109        wc_lock(wc);
1110        wc->flush_on_suspend = true;
1111        wc_unlock(wc);
1112
1113        return 0;
1114}
1115
1116static void activate_cleaner(struct dm_writecache *wc)
1117{
1118        wc->flush_on_suspend = true;
1119        wc->cleaner = true;
1120        wc->freelist_high_watermark = wc->n_blocks;
1121        wc->freelist_low_watermark = wc->n_blocks;
1122}
1123
1124static int process_cleaner_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
1125{
1126        if (argc != 1)
1127                return -EINVAL;
1128
1129        wc_lock(wc);
1130        activate_cleaner(wc);
1131        if (!dm_suspended(wc->ti))
1132                writecache_verify_watermark(wc);
1133        wc_unlock(wc);
1134
1135        return 0;
1136}
1137
1138static int writecache_message(struct dm_target *ti, unsigned argc, char **argv,
1139                              char *result, unsigned maxlen)
1140{
1141        int r = -EINVAL;
1142        struct dm_writecache *wc = ti->private;
1143
1144        if (!strcasecmp(argv[0], "flush"))
1145                r = process_flush_mesg(argc, argv, wc);
1146        else if (!strcasecmp(argv[0], "flush_on_suspend"))
1147                r = process_flush_on_suspend_mesg(argc, argv, wc);
1148        else if (!strcasecmp(argv[0], "cleaner"))
1149                r = process_cleaner_mesg(argc, argv, wc);
1150        else
1151                DMERR("unrecognised message received: %s", argv[0]);
1152
1153        return r;
1154}
1155
1156static void memcpy_flushcache_optimized(void *dest, void *source, size_t size)
1157{
1158        /*
1159         * clflushopt performs better with block size 1024, 2048, 4096
1160         * non-temporal stores perform better with block size 512
1161         *
1162         * block size   512             1024            2048            4096
1163         * movnti       496 MB/s        642 MB/s        725 MB/s        744 MB/s
1164         * clflushopt   373 MB/s        688 MB/s        1.1 GB/s        1.2 GB/s
1165         *
1166         * We see that movnti performs better for 512-byte blocks, and
1167         * clflushopt performs better for 1024-byte and larger blocks. So, we
1168         * prefer clflushopt for sizes >= 768.
1169         *
1170         * NOTE: this happens to be the case now (with dm-writecache's single
1171         * threaded model) but re-evaluate this once memcpy_flushcache() is
1172         * enabled to use movdir64b which might invalidate this performance
1173         * advantage seen with cache-allocating-writes plus flushing.
1174         */
1175#ifdef CONFIG_X86
1176        if (static_cpu_has(X86_FEATURE_CLFLUSHOPT) &&
1177            likely(boot_cpu_data.x86_clflush_size == 64) &&
1178            likely(size >= 768)) {
1179                do {
1180                        memcpy((void *)dest, (void *)source, 64);
1181                        clflushopt((void *)dest);
1182                        dest += 64;
1183                        source += 64;
1184                        size -= 64;
1185                } while (size >= 64);
1186                return;
1187        }
1188#endif
1189        memcpy_flushcache(dest, source, size);
1190}
1191
1192static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data)
1193{
1194        void *buf;
1195        unsigned long flags;
1196        unsigned size;
1197        int rw = bio_data_dir(bio);
1198        unsigned remaining_size = wc->block_size;
1199
1200        do {
1201                struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter);
1202                buf = bvec_kmap_irq(&bv, &flags);
1203                size = bv.bv_len;
1204                if (unlikely(size > remaining_size))
1205                        size = remaining_size;
1206
1207                if (rw == READ) {
1208                        int r;
1209                        r = memcpy_mcsafe(buf, data, size);
1210                        flush_dcache_page(bio_page(bio));
1211                        if (unlikely(r)) {
1212                                writecache_error(wc, r, "hardware memory error when reading data: %d", r);
1213                                bio->bi_status = BLK_STS_IOERR;
1214                        }
1215                } else {
1216                        flush_dcache_page(bio_page(bio));
1217                        memcpy_flushcache_optimized(data, buf, size);
1218                }
1219
1220                bvec_kunmap_irq(buf, &flags);
1221
1222                data = (char *)data + size;
1223                remaining_size -= size;
1224                bio_advance(bio, size);
1225        } while (unlikely(remaining_size));
1226}
1227
1228static int writecache_flush_thread(void *data)
1229{
1230        struct dm_writecache *wc = data;
1231
1232        while (1) {
1233                struct bio *bio;
1234
1235                wc_lock(wc);
1236                bio = bio_list_pop(&wc->flush_list);
1237                if (!bio) {
1238                        set_current_state(TASK_INTERRUPTIBLE);
1239                        wc_unlock(wc);
1240
1241                        if (unlikely(kthread_should_stop())) {
1242                                set_current_state(TASK_RUNNING);
1243                                break;
1244                        }
1245
1246                        schedule();
1247                        continue;
1248                }
1249
1250                if (bio_op(bio) == REQ_OP_DISCARD) {
1251                        writecache_discard(wc, bio->bi_iter.bi_sector,
1252                                           bio_end_sector(bio));
1253                        wc_unlock(wc);
1254                        bio_set_dev(bio, wc->dev->bdev);
1255                        submit_bio_noacct(bio);
1256                } else {
1257                        writecache_flush(wc);
1258                        wc_unlock(wc);
1259                        if (writecache_has_error(wc))
1260                                bio->bi_status = BLK_STS_IOERR;
1261                        bio_endio(bio);
1262                }
1263        }
1264
1265        return 0;
1266}
1267
1268static void writecache_offload_bio(struct dm_writecache *wc, struct bio *bio)
1269{
1270        if (bio_list_empty(&wc->flush_list))
1271                wake_up_process(wc->flush_thread);
1272        bio_list_add(&wc->flush_list, bio);
1273}
1274
1275static int writecache_map(struct dm_target *ti, struct bio *bio)
1276{
1277        struct wc_entry *e;
1278        struct dm_writecache *wc = ti->private;
1279
1280        bio->bi_private = NULL;
1281
1282        wc_lock(wc);
1283
1284        if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1285                if (writecache_has_error(wc))
1286                        goto unlock_error;
1287                if (WC_MODE_PMEM(wc)) {
1288                        writecache_flush(wc);
1289                        if (writecache_has_error(wc))
1290                                goto unlock_error;
1291                        goto unlock_submit;
1292                } else {
1293                        writecache_offload_bio(wc, bio);
1294                        goto unlock_return;
1295                }
1296        }
1297
1298        bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1299
1300        if (unlikely((((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
1301                                (wc->block_size / 512 - 1)) != 0)) {
1302                DMERR("I/O is not aligned, sector %llu, size %u, block size %u",
1303                      (unsigned long long)bio->bi_iter.bi_sector,
1304                      bio->bi_iter.bi_size, wc->block_size);
1305                goto unlock_error;
1306        }
1307
1308        if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
1309                if (writecache_has_error(wc))
1310                        goto unlock_error;
1311                if (WC_MODE_PMEM(wc)) {
1312                        writecache_discard(wc, bio->bi_iter.bi_sector, bio_end_sector(bio));
1313                        goto unlock_remap_origin;
1314                } else {
1315                        writecache_offload_bio(wc, bio);
1316                        goto unlock_return;
1317                }
1318        }
1319
1320        if (bio_data_dir(bio) == READ) {
1321read_next_block:
1322                e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
1323                if (e && read_original_sector(wc, e) == bio->bi_iter.bi_sector) {
1324                        if (WC_MODE_PMEM(wc)) {
1325                                bio_copy_block(wc, bio, memory_data(wc, e));
1326                                if (bio->bi_iter.bi_size)
1327                                        goto read_next_block;
1328                                goto unlock_submit;
1329                        } else {
1330                                dm_accept_partial_bio(bio, wc->block_size >> SECTOR_SHIFT);
1331                                bio_set_dev(bio, wc->ssd_dev->bdev);
1332                                bio->bi_iter.bi_sector = cache_sector(wc, e);
1333                                if (!writecache_entry_is_committed(wc, e))
1334                                        writecache_wait_for_ios(wc, WRITE);
1335                                goto unlock_remap;
1336                        }
1337                } else {
1338                        if (e) {
1339                                sector_t next_boundary =
1340                                        read_original_sector(wc, e) - bio->bi_iter.bi_sector;
1341                                if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT) {
1342                                        dm_accept_partial_bio(bio, next_boundary);
1343                                }
1344                        }
1345                        goto unlock_remap_origin;
1346                }
1347        } else {
1348                do {
1349                        bool found_entry = false;
1350                        if (writecache_has_error(wc))
1351                                goto unlock_error;
1352                        e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0);
1353                        if (e) {
1354                                if (!writecache_entry_is_committed(wc, e))
1355                                        goto bio_copy;
1356                                if (!WC_MODE_PMEM(wc) && !e->write_in_progress) {
1357                                        wc->overwrote_committed = true;
1358                                        goto bio_copy;
1359                                }
1360                                found_entry = true;
1361                        } else {
1362                                if (unlikely(wc->cleaner))
1363                                        goto direct_write;
1364                        }
1365                        e = writecache_pop_from_freelist(wc, (sector_t)-1);
1366                        if (unlikely(!e)) {
1367                                if (!found_entry) {
1368direct_write:
1369                                        e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
1370                                        if (e) {
1371                                                sector_t next_boundary = read_original_sector(wc, e) - bio->bi_iter.bi_sector;
1372                                                BUG_ON(!next_boundary);
1373                                                if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT) {
1374                                                        dm_accept_partial_bio(bio, next_boundary);
1375                                                }
1376                                        }
1377                                        goto unlock_remap_origin;
1378                                }
1379                                writecache_wait_on_freelist(wc);
1380                                continue;
1381                        }
1382                        write_original_sector_seq_count(wc, e, bio->bi_iter.bi_sector, wc->seq_count);
1383                        writecache_insert_entry(wc, e);
1384                        wc->uncommitted_blocks++;
1385bio_copy:
1386                        if (WC_MODE_PMEM(wc)) {
1387                                bio_copy_block(wc, bio, memory_data(wc, e));
1388                        } else {
1389                                unsigned bio_size = wc->block_size;
1390                                sector_t start_cache_sec = cache_sector(wc, e);
1391                                sector_t current_cache_sec = start_cache_sec + (bio_size >> SECTOR_SHIFT);
1392
1393                                while (bio_size < bio->bi_iter.bi_size) {
1394                                        struct wc_entry *f = writecache_pop_from_freelist(wc, current_cache_sec);
1395                                        if (!f)
1396                                                break;
1397                                        write_original_sector_seq_count(wc, f, bio->bi_iter.bi_sector +
1398                                                                        (bio_size >> SECTOR_SHIFT), wc->seq_count);
1399                                        writecache_insert_entry(wc, f);
1400                                        wc->uncommitted_blocks++;
1401                                        bio_size += wc->block_size;
1402                                        current_cache_sec += wc->block_size >> SECTOR_SHIFT;
1403                                }
1404
1405                                bio_set_dev(bio, wc->ssd_dev->bdev);
1406                                bio->bi_iter.bi_sector = start_cache_sec;
1407                                dm_accept_partial_bio(bio, bio_size >> SECTOR_SHIFT);
1408
1409                                if (unlikely(wc->uncommitted_blocks >= wc->autocommit_blocks)) {
1410                                        wc->uncommitted_blocks = 0;
1411                                        queue_work(wc->writeback_wq, &wc->flush_work);
1412                                } else {
1413                                        writecache_schedule_autocommit(wc);
1414                                }
1415                                goto unlock_remap;
1416                        }
1417                } while (bio->bi_iter.bi_size);
1418
1419                if (unlikely(bio->bi_opf & REQ_FUA ||
1420                             wc->uncommitted_blocks >= wc->autocommit_blocks))
1421                        writecache_flush(wc);
1422                else
1423                        writecache_schedule_autocommit(wc);
1424                goto unlock_submit;
1425        }
1426
1427unlock_remap_origin:
1428        bio_set_dev(bio, wc->dev->bdev);
1429        wc_unlock(wc);
1430        return DM_MAPIO_REMAPPED;
1431
1432unlock_remap:
1433        /* make sure that writecache_end_io decrements bio_in_progress: */
1434        bio->bi_private = (void *)1;
1435        atomic_inc(&wc->bio_in_progress[bio_data_dir(bio)]);
1436        wc_unlock(wc);
1437        return DM_MAPIO_REMAPPED;
1438
1439unlock_submit:
1440        wc_unlock(wc);
1441        bio_endio(bio);
1442        return DM_MAPIO_SUBMITTED;
1443
1444unlock_return:
1445        wc_unlock(wc);
1446        return DM_MAPIO_SUBMITTED;
1447
1448unlock_error:
1449        wc_unlock(wc);
1450        bio_io_error(bio);
1451        return DM_MAPIO_SUBMITTED;
1452}
1453
1454static int writecache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *status)
1455{
1456        struct dm_writecache *wc = ti->private;
1457
1458        if (bio->bi_private != NULL) {
1459                int dir = bio_data_dir(bio);
1460                if (atomic_dec_and_test(&wc->bio_in_progress[dir]))
1461                        if (unlikely(waitqueue_active(&wc->bio_in_progress_wait[dir])))
1462                                wake_up(&wc->bio_in_progress_wait[dir]);
1463        }
1464        return 0;
1465}
1466
1467static int writecache_iterate_devices(struct dm_target *ti,
1468                                      iterate_devices_callout_fn fn, void *data)
1469{
1470        struct dm_writecache *wc = ti->private;
1471
1472        return fn(ti, wc->dev, 0, ti->len, data);
1473}
1474
1475static void writecache_io_hints(struct dm_target *ti, struct queue_limits *limits)
1476{
1477        struct dm_writecache *wc = ti->private;
1478
1479        if (limits->logical_block_size < wc->block_size)
1480                limits->logical_block_size = wc->block_size;
1481
1482        if (limits->physical_block_size < wc->block_size)
1483                limits->physical_block_size = wc->block_size;
1484
1485        if (limits->io_min < wc->block_size)
1486                limits->io_min = wc->block_size;
1487}
1488
1489
1490static void writecache_writeback_endio(struct bio *bio)
1491{
1492        struct writeback_struct *wb = container_of(bio, struct writeback_struct, bio);
1493        struct dm_writecache *wc = wb->wc;
1494        unsigned long flags;
1495
1496        raw_spin_lock_irqsave(&wc->endio_list_lock, flags);
1497        if (unlikely(list_empty(&wc->endio_list)))
1498                wake_up_process(wc->endio_thread);
1499        list_add_tail(&wb->endio_entry, &wc->endio_list);
1500        raw_spin_unlock_irqrestore(&wc->endio_list_lock, flags);
1501}
1502
1503static void writecache_copy_endio(int read_err, unsigned long write_err, void *ptr)
1504{
1505        struct copy_struct *c = ptr;
1506        struct dm_writecache *wc = c->wc;
1507
1508        c->error = likely(!(read_err | write_err)) ? 0 : -EIO;
1509
1510        raw_spin_lock_irq(&wc->endio_list_lock);
1511        if (unlikely(list_empty(&wc->endio_list)))
1512                wake_up_process(wc->endio_thread);
1513        list_add_tail(&c->endio_entry, &wc->endio_list);
1514        raw_spin_unlock_irq(&wc->endio_list_lock);
1515}
1516
1517static void __writecache_endio_pmem(struct dm_writecache *wc, struct list_head *list)
1518{
1519        unsigned i;
1520        struct writeback_struct *wb;
1521        struct wc_entry *e;
1522        unsigned long n_walked = 0;
1523
1524        do {
1525                wb = list_entry(list->next, struct writeback_struct, endio_entry);
1526                list_del(&wb->endio_entry);
1527
1528                if (unlikely(wb->bio.bi_status != BLK_STS_OK))
1529                        writecache_error(wc, blk_status_to_errno(wb->bio.bi_status),
1530                                        "write error %d", wb->bio.bi_status);
1531                i = 0;
1532                do {
1533                        e = wb->wc_list[i];
1534                        BUG_ON(!e->write_in_progress);
1535                        e->write_in_progress = false;
1536                        INIT_LIST_HEAD(&e->lru);
1537                        if (!writecache_has_error(wc))
1538                                writecache_free_entry(wc, e);
1539                        BUG_ON(!wc->writeback_size);
1540                        wc->writeback_size--;
1541                        n_walked++;
1542                        if (unlikely(n_walked >= ENDIO_LATENCY)) {
1543                                writecache_commit_flushed(wc, false);
1544                                wc_unlock(wc);
1545                                wc_lock(wc);
1546                                n_walked = 0;
1547                        }
1548                } while (++i < wb->wc_list_n);
1549
1550                if (wb->wc_list != wb->wc_list_inline)
1551                        kfree(wb->wc_list);
1552                bio_put(&wb->bio);
1553        } while (!list_empty(list));
1554}
1555
1556static void __writecache_endio_ssd(struct dm_writecache *wc, struct list_head *list)
1557{
1558        struct copy_struct *c;
1559        struct wc_entry *e;
1560
1561        do {
1562                c = list_entry(list->next, struct copy_struct, endio_entry);
1563                list_del(&c->endio_entry);
1564
1565                if (unlikely(c->error))
1566                        writecache_error(wc, c->error, "copy error");
1567
1568                e = c->e;
1569                do {
1570                        BUG_ON(!e->write_in_progress);
1571                        e->write_in_progress = false;
1572                        INIT_LIST_HEAD(&e->lru);
1573                        if (!writecache_has_error(wc))
1574                                writecache_free_entry(wc, e);
1575
1576                        BUG_ON(!wc->writeback_size);
1577                        wc->writeback_size--;
1578                        e++;
1579                } while (--c->n_entries);
1580                mempool_free(c, &wc->copy_pool);
1581        } while (!list_empty(list));
1582}
1583
1584static int writecache_endio_thread(void *data)
1585{
1586        struct dm_writecache *wc = data;
1587
1588        while (1) {
1589                struct list_head list;
1590
1591                raw_spin_lock_irq(&wc->endio_list_lock);
1592                if (!list_empty(&wc->endio_list))
1593                        goto pop_from_list;
1594                set_current_state(TASK_INTERRUPTIBLE);
1595                raw_spin_unlock_irq(&wc->endio_list_lock);
1596
1597                if (unlikely(kthread_should_stop())) {
1598                        set_current_state(TASK_RUNNING);
1599                        break;
1600                }
1601
1602                schedule();
1603
1604                continue;
1605
1606pop_from_list:
1607                list = wc->endio_list;
1608                list.next->prev = list.prev->next = &list;
1609                INIT_LIST_HEAD(&wc->endio_list);
1610                raw_spin_unlock_irq(&wc->endio_list_lock);
1611
1612                if (!WC_MODE_FUA(wc))
1613                        writecache_disk_flush(wc, wc->dev);
1614
1615                wc_lock(wc);
1616
1617                if (WC_MODE_PMEM(wc)) {
1618                        __writecache_endio_pmem(wc, &list);
1619                } else {
1620                        __writecache_endio_ssd(wc, &list);
1621                        writecache_wait_for_ios(wc, READ);
1622                }
1623
1624                writecache_commit_flushed(wc, false);
1625
1626                wc_unlock(wc);
1627        }
1628
1629        return 0;
1630}
1631
1632static bool wc_add_block(struct writeback_struct *wb, struct wc_entry *e, gfp_t gfp)
1633{
1634        struct dm_writecache *wc = wb->wc;
1635        unsigned block_size = wc->block_size;
1636        void *address = memory_data(wc, e);
1637
1638        persistent_memory_flush_cache(address, block_size);
1639        return bio_add_page(&wb->bio, persistent_memory_page(address),
1640                            block_size, persistent_memory_page_offset(address)) != 0;
1641}
1642
1643struct writeback_list {
1644        struct list_head list;
1645        size_t size;
1646};
1647
1648static void __writeback_throttle(struct dm_writecache *wc, struct writeback_list *wbl)
1649{
1650        if (unlikely(wc->max_writeback_jobs)) {
1651                if (READ_ONCE(wc->writeback_size) - wbl->size >= wc->max_writeback_jobs) {
1652                        wc_lock(wc);
1653                        while (wc->writeback_size - wbl->size >= wc->max_writeback_jobs)
1654                                writecache_wait_on_freelist(wc);
1655                        wc_unlock(wc);
1656                }
1657        }
1658        cond_resched();
1659}
1660
1661static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeback_list *wbl)
1662{
1663        struct wc_entry *e, *f;
1664        struct bio *bio;
1665        struct writeback_struct *wb;
1666        unsigned max_pages;
1667
1668        while (wbl->size) {
1669                wbl->size--;
1670                e = container_of(wbl->list.prev, struct wc_entry, lru);
1671                list_del(&e->lru);
1672
1673                max_pages = e->wc_list_contiguous;
1674
1675                bio = bio_alloc_bioset(GFP_NOIO, max_pages, &wc->bio_set);
1676                wb = container_of(bio, struct writeback_struct, bio);
1677                wb->wc = wc;
1678                bio->bi_end_io = writecache_writeback_endio;
1679                bio_set_dev(bio, wc->dev->bdev);
1680                bio->bi_iter.bi_sector = read_original_sector(wc, e);
1681                if (max_pages <= WB_LIST_INLINE ||
1682                    unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *),
1683                                                           GFP_NOIO | __GFP_NORETRY |
1684                                                           __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
1685                        wb->wc_list = wb->wc_list_inline;
1686                        max_pages = WB_LIST_INLINE;
1687                }
1688
1689                BUG_ON(!wc_add_block(wb, e, GFP_NOIO));
1690
1691                wb->wc_list[0] = e;
1692                wb->wc_list_n = 1;
1693
1694                while (wbl->size && wb->wc_list_n < max_pages) {
1695                        f = container_of(wbl->list.prev, struct wc_entry, lru);
1696                        if (read_original_sector(wc, f) !=
1697                            read_original_sector(wc, e) + (wc->block_size >> SECTOR_SHIFT))
1698                                break;
1699                        if (!wc_add_block(wb, f, GFP_NOWAIT | __GFP_NOWARN))
1700                                break;
1701                        wbl->size--;
1702                        list_del(&f->lru);
1703                        wb->wc_list[wb->wc_list_n++] = f;
1704                        e = f;
1705                }
1706                bio_set_op_attrs(bio, REQ_OP_WRITE, WC_MODE_FUA(wc) * REQ_FUA);
1707                if (writecache_has_error(wc)) {
1708                        bio->bi_status = BLK_STS_IOERR;
1709                        bio_endio(bio);
1710                } else {
1711                        submit_bio(bio);
1712                }
1713
1714                __writeback_throttle(wc, wbl);
1715        }
1716}
1717
1718static void __writecache_writeback_ssd(struct dm_writecache *wc, struct writeback_list *wbl)
1719{
1720        struct wc_entry *e, *f;
1721        struct dm_io_region from, to;
1722        struct copy_struct *c;
1723
1724        while (wbl->size) {
1725                unsigned n_sectors;
1726
1727                wbl->size--;
1728                e = container_of(wbl->list.prev, struct wc_entry, lru);
1729                list_del(&e->lru);
1730
1731                n_sectors = e->wc_list_contiguous << (wc->block_size_bits - SECTOR_SHIFT);
1732
1733                from.bdev = wc->ssd_dev->bdev;
1734                from.sector = cache_sector(wc, e);
1735                from.count = n_sectors;
1736                to.bdev = wc->dev->bdev;
1737                to.sector = read_original_sector(wc, e);
1738                to.count = n_sectors;
1739
1740                c = mempool_alloc(&wc->copy_pool, GFP_NOIO);
1741                c->wc = wc;
1742                c->e = e;
1743                c->n_entries = e->wc_list_contiguous;
1744
1745                while ((n_sectors -= wc->block_size >> SECTOR_SHIFT)) {
1746                        wbl->size--;
1747                        f = container_of(wbl->list.prev, struct wc_entry, lru);
1748                        BUG_ON(f != e + 1);
1749                        list_del(&f->lru);
1750                        e = f;
1751                }
1752
1753                dm_kcopyd_copy(wc->dm_kcopyd, &from, 1, &to, 0, writecache_copy_endio, c);
1754
1755                __writeback_throttle(wc, wbl);
1756        }
1757}
1758
1759static void writecache_writeback(struct work_struct *work)
1760{
1761        struct dm_writecache *wc = container_of(work, struct dm_writecache, writeback_work);
1762        struct blk_plug plug;
1763        struct wc_entry *f, *g, *e = NULL;
1764        struct rb_node *node, *next_node;
1765        struct list_head skipped;
1766        struct writeback_list wbl;
1767        unsigned long n_walked;
1768
1769        wc_lock(wc);
1770restart:
1771        if (writecache_has_error(wc)) {
1772                wc_unlock(wc);
1773                return;
1774        }
1775
1776        if (unlikely(wc->writeback_all)) {
1777                if (writecache_wait_for_writeback(wc))
1778                        goto restart;
1779        }
1780
1781        if (wc->overwrote_committed) {
1782                writecache_wait_for_ios(wc, WRITE);
1783        }
1784
1785        n_walked = 0;
1786        INIT_LIST_HEAD(&skipped);
1787        INIT_LIST_HEAD(&wbl.list);
1788        wbl.size = 0;
1789        while (!list_empty(&wc->lru) &&
1790               (wc->writeback_all ||
1791                wc->freelist_size + wc->writeback_size <= wc->freelist_low_watermark ||
1792                (jiffies - container_of(wc->lru.prev, struct wc_entry, lru)->age >=
1793                 wc->max_age - wc->max_age / MAX_AGE_DIV))) {
1794
1795                n_walked++;
1796                if (unlikely(n_walked > WRITEBACK_LATENCY) &&
1797                    likely(!wc->writeback_all) && likely(!dm_suspended(wc->ti))) {
1798                        queue_work(wc->writeback_wq, &wc->writeback_work);
1799                        break;
1800                }
1801
1802                if (unlikely(wc->writeback_all)) {
1803                        if (unlikely(!e)) {
1804                                writecache_flush(wc);
1805                                e = container_of(rb_first(&wc->tree), struct wc_entry, rb_node);
1806                        } else
1807                                e = g;
1808                } else
1809                        e = container_of(wc->lru.prev, struct wc_entry, lru);
1810                BUG_ON(e->write_in_progress);
1811                if (unlikely(!writecache_entry_is_committed(wc, e))) {
1812                        writecache_flush(wc);
1813                }
1814                node = rb_prev(&e->rb_node);
1815                if (node) {
1816                        f = container_of(node, struct wc_entry, rb_node);
1817                        if (unlikely(read_original_sector(wc, f) ==
1818                                     read_original_sector(wc, e))) {
1819                                BUG_ON(!f->write_in_progress);
1820                                list_del(&e->lru);
1821                                list_add(&e->lru, &skipped);
1822                                cond_resched();
1823                                continue;
1824                        }
1825                }
1826                wc->writeback_size++;
1827                list_del(&e->lru);
1828                list_add(&e->lru, &wbl.list);
1829                wbl.size++;
1830                e->write_in_progress = true;
1831                e->wc_list_contiguous = 1;
1832
1833                f = e;
1834
1835                while (1) {
1836                        next_node = rb_next(&f->rb_node);
1837                        if (unlikely(!next_node))
1838                                break;
1839                        g = container_of(next_node, struct wc_entry, rb_node);
1840                        if (unlikely(read_original_sector(wc, g) ==
1841                            read_original_sector(wc, f))) {
1842                                f = g;
1843                                continue;
1844                        }
1845                        if (read_original_sector(wc, g) !=
1846                            read_original_sector(wc, f) + (wc->block_size >> SECTOR_SHIFT))
1847                                break;
1848                        if (unlikely(g->write_in_progress))
1849                                break;
1850                        if (unlikely(!writecache_entry_is_committed(wc, g)))
1851                                break;
1852
1853                        if (!WC_MODE_PMEM(wc)) {
1854                                if (g != f + 1)
1855                                        break;
1856                        }
1857
1858                        n_walked++;
1859                        //if (unlikely(n_walked > WRITEBACK_LATENCY) && likely(!wc->writeback_all))
1860                        //      break;
1861
1862                        wc->writeback_size++;
1863                        list_del(&g->lru);
1864                        list_add(&g->lru, &wbl.list);
1865                        wbl.size++;
1866                        g->write_in_progress = true;
1867                        g->wc_list_contiguous = BIO_MAX_PAGES;
1868                        f = g;
1869                        e->wc_list_contiguous++;
1870                        if (unlikely(e->wc_list_contiguous == BIO_MAX_PAGES)) {
1871                                if (unlikely(wc->writeback_all)) {
1872                                        next_node = rb_next(&f->rb_node);
1873                                        if (likely(next_node))
1874                                                g = container_of(next_node, struct wc_entry, rb_node);
1875                                }
1876                                break;
1877                        }
1878                }
1879                cond_resched();
1880        }
1881
1882        if (!list_empty(&skipped)) {
1883                list_splice_tail(&skipped, &wc->lru);
1884                /*
1885                 * If we didn't do any progress, we must wait until some
1886                 * writeback finishes to avoid burning CPU in a loop
1887                 */
1888                if (unlikely(!wbl.size))
1889                        writecache_wait_for_writeback(wc);
1890        }
1891
1892        wc_unlock(wc);
1893
1894        blk_start_plug(&plug);
1895
1896        if (WC_MODE_PMEM(wc))
1897                __writecache_writeback_pmem(wc, &wbl);
1898        else
1899                __writecache_writeback_ssd(wc, &wbl);
1900
1901        blk_finish_plug(&plug);
1902
1903        if (unlikely(wc->writeback_all)) {
1904                wc_lock(wc);
1905                while (writecache_wait_for_writeback(wc));
1906                wc_unlock(wc);
1907        }
1908}
1909
1910static int calculate_memory_size(uint64_t device_size, unsigned block_size,
1911                                 size_t *n_blocks_p, size_t *n_metadata_blocks_p)
1912{
1913        uint64_t n_blocks, offset;
1914        struct wc_entry e;
1915
1916        n_blocks = device_size;
1917        do_div(n_blocks, block_size + sizeof(struct wc_memory_entry));
1918
1919        while (1) {
1920                if (!n_blocks)
1921                        return -ENOSPC;
1922                /* Verify the following entries[n_blocks] won't overflow */
1923                if (n_blocks >= ((size_t)-sizeof(struct wc_memory_superblock) /
1924                                 sizeof(struct wc_memory_entry)))
1925                        return -EFBIG;
1926                offset = offsetof(struct wc_memory_superblock, entries[n_blocks]);
1927                offset = (offset + block_size - 1) & ~(uint64_t)(block_size - 1);
1928                if (offset + n_blocks * block_size <= device_size)
1929                        break;
1930                n_blocks--;
1931        }
1932
1933        /* check if the bit field overflows */
1934        e.index = n_blocks;
1935        if (e.index != n_blocks)
1936                return -EFBIG;
1937
1938        if (n_blocks_p)
1939                *n_blocks_p = n_blocks;
1940        if (n_metadata_blocks_p)
1941                *n_metadata_blocks_p = offset >> __ffs(block_size);
1942        return 0;
1943}
1944
1945static int init_memory(struct dm_writecache *wc)
1946{
1947        size_t b;
1948        int r;
1949
1950        r = calculate_memory_size(wc->memory_map_size, wc->block_size, &wc->n_blocks, NULL);
1951        if (r)
1952                return r;
1953
1954        r = writecache_alloc_entries(wc);
1955        if (r)
1956                return r;
1957
1958        for (b = 0; b < ARRAY_SIZE(sb(wc)->padding); b++)
1959                pmem_assign(sb(wc)->padding[b], cpu_to_le64(0));
1960        pmem_assign(sb(wc)->version, cpu_to_le32(MEMORY_SUPERBLOCK_VERSION));
1961        pmem_assign(sb(wc)->block_size, cpu_to_le32(wc->block_size));
1962        pmem_assign(sb(wc)->n_blocks, cpu_to_le64(wc->n_blocks));
1963        pmem_assign(sb(wc)->seq_count, cpu_to_le64(0));
1964
1965        for (b = 0; b < wc->n_blocks; b++) {
1966                write_original_sector_seq_count(wc, &wc->entries[b], -1, -1);
1967                cond_resched();
1968        }
1969
1970        writecache_flush_all_metadata(wc);
1971        writecache_commit_flushed(wc, false);
1972        pmem_assign(sb(wc)->magic, cpu_to_le32(MEMORY_SUPERBLOCK_MAGIC));
1973        writecache_flush_region(wc, &sb(wc)->magic, sizeof sb(wc)->magic);
1974        writecache_commit_flushed(wc, false);
1975
1976        return 0;
1977}
1978
1979static void writecache_dtr(struct dm_target *ti)
1980{
1981        struct dm_writecache *wc = ti->private;
1982
1983        if (!wc)
1984                return;
1985
1986        if (wc->endio_thread)
1987                kthread_stop(wc->endio_thread);
1988
1989        if (wc->flush_thread)
1990                kthread_stop(wc->flush_thread);
1991
1992        bioset_exit(&wc->bio_set);
1993
1994        mempool_exit(&wc->copy_pool);
1995
1996        if (wc->writeback_wq)
1997                destroy_workqueue(wc->writeback_wq);
1998
1999        if (wc->dev)
2000                dm_put_device(ti, wc->dev);
2001
2002        if (wc->ssd_dev)
2003                dm_put_device(ti, wc->ssd_dev);
2004
2005        if (wc->entries)
2006                vfree(wc->entries);
2007
2008        if (wc->memory_map) {
2009                if (WC_MODE_PMEM(wc))
2010                        persistent_memory_release(wc);
2011                else
2012                        vfree(wc->memory_map);
2013        }
2014
2015        if (wc->dm_kcopyd)
2016                dm_kcopyd_client_destroy(wc->dm_kcopyd);
2017
2018        if (wc->dm_io)
2019                dm_io_client_destroy(wc->dm_io);
2020
2021        if (wc->dirty_bitmap)
2022                vfree(wc->dirty_bitmap);
2023
2024        kfree(wc);
2025}
2026
2027static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
2028{
2029        struct dm_writecache *wc;
2030        struct dm_arg_set as;
2031        const char *string;
2032        unsigned opt_params;
2033        size_t offset, data_size;
2034        int i, r;
2035        char dummy;
2036        int high_wm_percent = HIGH_WATERMARK;
2037        int low_wm_percent = LOW_WATERMARK;
2038        uint64_t x;
2039        struct wc_memory_superblock s;
2040
2041        static struct dm_arg _args[] = {
2042                {0, 10, "Invalid number of feature args"},
2043        };
2044
2045        as.argc = argc;
2046        as.argv = argv;
2047
2048        wc = kzalloc(sizeof(struct dm_writecache), GFP_KERNEL);
2049        if (!wc) {
2050                ti->error = "Cannot allocate writecache structure";
2051                r = -ENOMEM;
2052                goto bad;
2053        }
2054        ti->private = wc;
2055        wc->ti = ti;
2056
2057        mutex_init(&wc->lock);
2058        wc->max_age = MAX_AGE_UNSPECIFIED;
2059        writecache_poison_lists(wc);
2060        init_waitqueue_head(&wc->freelist_wait);
2061        timer_setup(&wc->autocommit_timer, writecache_autocommit_timer, 0);
2062        timer_setup(&wc->max_age_timer, writecache_max_age_timer, 0);
2063
2064        for (i = 0; i < 2; i++) {
2065                atomic_set(&wc->bio_in_progress[i], 0);
2066                init_waitqueue_head(&wc->bio_in_progress_wait[i]);
2067        }
2068
2069        wc->dm_io = dm_io_client_create();
2070        if (IS_ERR(wc->dm_io)) {
2071                r = PTR_ERR(wc->dm_io);
2072                ti->error = "Unable to allocate dm-io client";
2073                wc->dm_io = NULL;
2074                goto bad;
2075        }
2076
2077        wc->writeback_wq = alloc_workqueue("writecache-writeback", WQ_MEM_RECLAIM, 1);
2078        if (!wc->writeback_wq) {
2079                r = -ENOMEM;
2080                ti->error = "Could not allocate writeback workqueue";
2081                goto bad;
2082        }
2083        INIT_WORK(&wc->writeback_work, writecache_writeback);
2084        INIT_WORK(&wc->flush_work, writecache_flush_work);
2085
2086        raw_spin_lock_init(&wc->endio_list_lock);
2087        INIT_LIST_HEAD(&wc->endio_list);
2088        wc->endio_thread = kthread_create(writecache_endio_thread, wc, "writecache_endio");
2089        if (IS_ERR(wc->endio_thread)) {
2090                r = PTR_ERR(wc->endio_thread);
2091                wc->endio_thread = NULL;
2092                ti->error = "Couldn't spawn endio thread";
2093                goto bad;
2094        }
2095        wake_up_process(wc->endio_thread);
2096
2097        /*
2098         * Parse the mode (pmem or ssd)
2099         */
2100        string = dm_shift_arg(&as);
2101        if (!string)
2102                goto bad_arguments;
2103
2104        if (!strcasecmp(string, "s")) {
2105                wc->pmem_mode = false;
2106        } else if (!strcasecmp(string, "p")) {
2107#ifdef DM_WRITECACHE_HAS_PMEM
2108                wc->pmem_mode = true;
2109                wc->writeback_fua = true;
2110#else
2111                /*
2112                 * If the architecture doesn't support persistent memory or
2113                 * the kernel doesn't support any DAX drivers, this driver can
2114                 * only be used in SSD-only mode.
2115                 */
2116                r = -EOPNOTSUPP;
2117                ti->error = "Persistent memory or DAX not supported on this system";
2118                goto bad;
2119#endif
2120        } else {
2121                goto bad_arguments;
2122        }
2123
2124        if (WC_MODE_PMEM(wc)) {
2125                r = bioset_init(&wc->bio_set, BIO_POOL_SIZE,
2126                                offsetof(struct writeback_struct, bio),
2127                                BIOSET_NEED_BVECS);
2128                if (r) {
2129                        ti->error = "Could not allocate bio set";
2130                        goto bad;
2131                }
2132        } else {
2133                r = mempool_init_kmalloc_pool(&wc->copy_pool, 1, sizeof(struct copy_struct));
2134                if (r) {
2135                        ti->error = "Could not allocate mempool";
2136                        goto bad;
2137                }
2138        }
2139
2140        /*
2141         * Parse the origin data device
2142         */
2143        string = dm_shift_arg(&as);
2144        if (!string)
2145                goto bad_arguments;
2146        r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->dev);
2147        if (r) {
2148                ti->error = "Origin data device lookup failed";
2149                goto bad;
2150        }
2151
2152        /*
2153         * Parse cache data device (be it pmem or ssd)
2154         */
2155        string = dm_shift_arg(&as);
2156        if (!string)
2157                goto bad_arguments;
2158
2159        r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->ssd_dev);
2160        if (r) {
2161                ti->error = "Cache data device lookup failed";
2162                goto bad;
2163        }
2164        wc->memory_map_size = i_size_read(wc->ssd_dev->bdev->bd_inode);
2165
2166        /*
2167         * Parse the cache block size
2168         */
2169        string = dm_shift_arg(&as);
2170        if (!string)
2171                goto bad_arguments;
2172        if (sscanf(string, "%u%c", &wc->block_size, &dummy) != 1 ||
2173            wc->block_size < 512 || wc->block_size > PAGE_SIZE ||
2174            (wc->block_size & (wc->block_size - 1))) {
2175                r = -EINVAL;
2176                ti->error = "Invalid block size";
2177                goto bad;
2178        }
2179        if (wc->block_size < bdev_logical_block_size(wc->dev->bdev) ||
2180            wc->block_size < bdev_logical_block_size(wc->ssd_dev->bdev)) {
2181                r = -EINVAL;
2182                ti->error = "Block size is smaller than device logical block size";
2183                goto bad;
2184        }
2185        wc->block_size_bits = __ffs(wc->block_size);
2186
2187        wc->max_writeback_jobs = MAX_WRITEBACK_JOBS;
2188        wc->autocommit_blocks = !WC_MODE_PMEM(wc) ? AUTOCOMMIT_BLOCKS_SSD : AUTOCOMMIT_BLOCKS_PMEM;
2189        wc->autocommit_jiffies = msecs_to_jiffies(AUTOCOMMIT_MSEC);
2190
2191        /*
2192         * Parse optional arguments
2193         */
2194        r = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
2195        if (r)
2196                goto bad;
2197
2198        while (opt_params) {
2199                string = dm_shift_arg(&as), opt_params--;
2200                if (!strcasecmp(string, "start_sector") && opt_params >= 1) {
2201                        unsigned long long start_sector;
2202                        string = dm_shift_arg(&as), opt_params--;
2203                        if (sscanf(string, "%llu%c", &start_sector, &dummy) != 1)
2204                                goto invalid_optional;
2205                        wc->start_sector = start_sector;
2206                        if (wc->start_sector != start_sector ||
2207                            wc->start_sector >= wc->memory_map_size >> SECTOR_SHIFT)
2208                                goto invalid_optional;
2209                } else if (!strcasecmp(string, "high_watermark") && opt_params >= 1) {
2210                        string = dm_shift_arg(&as), opt_params--;
2211                        if (sscanf(string, "%d%c", &high_wm_percent, &dummy) != 1)
2212                                goto invalid_optional;
2213                        if (high_wm_percent < 0 || high_wm_percent > 100)
2214                                goto invalid_optional;
2215                        wc->high_wm_percent_set = true;
2216                } else if (!strcasecmp(string, "low_watermark") && opt_params >= 1) {
2217                        string = dm_shift_arg(&as), opt_params--;
2218                        if (sscanf(string, "%d%c", &low_wm_percent, &dummy) != 1)
2219                                goto invalid_optional;
2220                        if (low_wm_percent < 0 || low_wm_percent > 100)
2221                                goto invalid_optional;
2222                        wc->low_wm_percent_set = true;
2223                } else if (!strcasecmp(string, "writeback_jobs") && opt_params >= 1) {
2224                        string = dm_shift_arg(&as), opt_params--;
2225                        if (sscanf(string, "%u%c", &wc->max_writeback_jobs, &dummy) != 1)
2226                                goto invalid_optional;
2227                        wc->max_writeback_jobs_set = true;
2228                } else if (!strcasecmp(string, "autocommit_blocks") && opt_params >= 1) {
2229                        string = dm_shift_arg(&as), opt_params--;
2230                        if (sscanf(string, "%u%c", &wc->autocommit_blocks, &dummy) != 1)
2231                                goto invalid_optional;
2232                        wc->autocommit_blocks_set = true;
2233                } else if (!strcasecmp(string, "autocommit_time") && opt_params >= 1) {
2234                        unsigned autocommit_msecs;
2235                        string = dm_shift_arg(&as), opt_params--;
2236                        if (sscanf(string, "%u%c", &autocommit_msecs, &dummy) != 1)
2237                                goto invalid_optional;
2238                        if (autocommit_msecs > 3600000)
2239                                goto invalid_optional;
2240                        wc->autocommit_jiffies = msecs_to_jiffies(autocommit_msecs);
2241                        wc->autocommit_time_set = true;
2242                } else if (!strcasecmp(string, "max_age") && opt_params >= 1) {
2243                        unsigned max_age_msecs;
2244                        string = dm_shift_arg(&as), opt_params--;
2245                        if (sscanf(string, "%u%c", &max_age_msecs, &dummy) != 1)
2246                                goto invalid_optional;
2247                        if (max_age_msecs > 86400000)
2248                                goto invalid_optional;
2249                        wc->max_age = msecs_to_jiffies(max_age_msecs);
2250                } else if (!strcasecmp(string, "cleaner")) {
2251                        wc->cleaner = true;
2252                } else if (!strcasecmp(string, "fua")) {
2253                        if (WC_MODE_PMEM(wc)) {
2254                                wc->writeback_fua = true;
2255                                wc->writeback_fua_set = true;
2256                        } else goto invalid_optional;
2257                } else if (!strcasecmp(string, "nofua")) {
2258                        if (WC_MODE_PMEM(wc)) {
2259                                wc->writeback_fua = false;
2260                                wc->writeback_fua_set = true;
2261                        } else goto invalid_optional;
2262                } else {
2263invalid_optional:
2264                        r = -EINVAL;
2265                        ti->error = "Invalid optional argument";
2266                        goto bad;
2267                }
2268        }
2269
2270        if (high_wm_percent < low_wm_percent) {
2271                r = -EINVAL;
2272                ti->error = "High watermark must be greater than or equal to low watermark";
2273                goto bad;
2274        }
2275
2276        if (WC_MODE_PMEM(wc)) {
2277                if (!dax_synchronous(wc->ssd_dev->dax_dev)) {
2278                        r = -EOPNOTSUPP;
2279                        ti->error = "Asynchronous persistent memory not supported as pmem cache";
2280                        goto bad;
2281                }
2282
2283                r = persistent_memory_claim(wc);
2284                if (r) {
2285                        ti->error = "Unable to map persistent memory for cache";
2286                        goto bad;
2287                }
2288        } else {
2289                size_t n_blocks, n_metadata_blocks;
2290                uint64_t n_bitmap_bits;
2291
2292                wc->memory_map_size -= (uint64_t)wc->start_sector << SECTOR_SHIFT;
2293
2294                bio_list_init(&wc->flush_list);
2295                wc->flush_thread = kthread_create(writecache_flush_thread, wc, "dm_writecache_flush");
2296                if (IS_ERR(wc->flush_thread)) {
2297                        r = PTR_ERR(wc->flush_thread);
2298                        wc->flush_thread = NULL;
2299                        ti->error = "Couldn't spawn flush thread";
2300                        goto bad;
2301                }
2302                wake_up_process(wc->flush_thread);
2303
2304                r = calculate_memory_size(wc->memory_map_size, wc->block_size,
2305                                          &n_blocks, &n_metadata_blocks);
2306                if (r) {
2307                        ti->error = "Invalid device size";
2308                        goto bad;
2309                }
2310
2311                n_bitmap_bits = (((uint64_t)n_metadata_blocks << wc->block_size_bits) +
2312                                 BITMAP_GRANULARITY - 1) / BITMAP_GRANULARITY;
2313                /* this is limitation of test_bit functions */
2314                if (n_bitmap_bits > 1U << 31) {
2315                        r = -EFBIG;
2316                        ti->error = "Invalid device size";
2317                        goto bad;
2318                }
2319
2320                wc->memory_map = vmalloc(n_metadata_blocks << wc->block_size_bits);
2321                if (!wc->memory_map) {
2322                        r = -ENOMEM;
2323                        ti->error = "Unable to allocate memory for metadata";
2324                        goto bad;
2325                }
2326
2327                wc->dm_kcopyd = dm_kcopyd_client_create(&dm_kcopyd_throttle);
2328                if (IS_ERR(wc->dm_kcopyd)) {
2329                        r = PTR_ERR(wc->dm_kcopyd);
2330                        ti->error = "Unable to allocate dm-kcopyd client";
2331                        wc->dm_kcopyd = NULL;
2332                        goto bad;
2333                }
2334
2335                wc->metadata_sectors = n_metadata_blocks << (wc->block_size_bits - SECTOR_SHIFT);
2336                wc->dirty_bitmap_size = (n_bitmap_bits + BITS_PER_LONG - 1) /
2337                        BITS_PER_LONG * sizeof(unsigned long);
2338                wc->dirty_bitmap = vzalloc(wc->dirty_bitmap_size);
2339                if (!wc->dirty_bitmap) {
2340                        r = -ENOMEM;
2341                        ti->error = "Unable to allocate dirty bitmap";
2342                        goto bad;
2343                }
2344
2345                r = writecache_read_metadata(wc, wc->block_size >> SECTOR_SHIFT);
2346                if (r) {
2347                        ti->error = "Unable to read first block of metadata";
2348                        goto bad;
2349                }
2350        }
2351
2352        r = memcpy_mcsafe(&s, sb(wc), sizeof(struct wc_memory_superblock));
2353        if (r) {
2354                ti->error = "Hardware memory error when reading superblock";
2355                goto bad;
2356        }
2357        if (!le32_to_cpu(s.magic) && !le32_to_cpu(s.version)) {
2358                r = init_memory(wc);
2359                if (r) {
2360                        ti->error = "Unable to initialize device";
2361                        goto bad;
2362                }
2363                r = memcpy_mcsafe(&s, sb(wc), sizeof(struct wc_memory_superblock));
2364                if (r) {
2365                        ti->error = "Hardware memory error when reading superblock";
2366                        goto bad;
2367                }
2368        }
2369
2370        if (le32_to_cpu(s.magic) != MEMORY_SUPERBLOCK_MAGIC) {
2371                ti->error = "Invalid magic in the superblock";
2372                r = -EINVAL;
2373                goto bad;
2374        }
2375
2376        if (le32_to_cpu(s.version) != MEMORY_SUPERBLOCK_VERSION) {
2377                ti->error = "Invalid version in the superblock";
2378                r = -EINVAL;
2379                goto bad;
2380        }
2381
2382        if (le32_to_cpu(s.block_size) != wc->block_size) {
2383                ti->error = "Block size does not match superblock";
2384                r = -EINVAL;
2385                goto bad;
2386        }
2387
2388        wc->n_blocks = le64_to_cpu(s.n_blocks);
2389
2390        offset = wc->n_blocks * sizeof(struct wc_memory_entry);
2391        if (offset / sizeof(struct wc_memory_entry) != le64_to_cpu(sb(wc)->n_blocks)) {
2392overflow:
2393                ti->error = "Overflow in size calculation";
2394                r = -EINVAL;
2395                goto bad;
2396        }
2397        offset += sizeof(struct wc_memory_superblock);
2398        if (offset < sizeof(struct wc_memory_superblock))
2399                goto overflow;
2400        offset = (offset + wc->block_size - 1) & ~(size_t)(wc->block_size - 1);
2401        data_size = wc->n_blocks * (size_t)wc->block_size;
2402        if (!offset || (data_size / wc->block_size != wc->n_blocks) ||
2403            (offset + data_size < offset))
2404                goto overflow;
2405        if (offset + data_size > wc->memory_map_size) {
2406                ti->error = "Memory area is too small";
2407                r = -EINVAL;
2408                goto bad;
2409        }
2410
2411        wc->metadata_sectors = offset >> SECTOR_SHIFT;
2412        wc->block_start = (char *)sb(wc) + offset;
2413
2414        x = (uint64_t)wc->n_blocks * (100 - high_wm_percent);
2415        x += 50;
2416        do_div(x, 100);
2417        wc->freelist_high_watermark = x;
2418        x = (uint64_t)wc->n_blocks * (100 - low_wm_percent);
2419        x += 50;
2420        do_div(x, 100);
2421        wc->freelist_low_watermark = x;
2422
2423        if (wc->cleaner)
2424                activate_cleaner(wc);
2425
2426        r = writecache_alloc_entries(wc);
2427        if (r) {
2428                ti->error = "Cannot allocate memory";
2429                goto bad;
2430        }
2431
2432        ti->num_flush_bios = 1;
2433        ti->flush_supported = true;
2434        ti->num_discard_bios = 1;
2435
2436        if (WC_MODE_PMEM(wc))
2437                persistent_memory_flush_cache(wc->memory_map, wc->memory_map_size);
2438
2439        return 0;
2440
2441bad_arguments:
2442        r = -EINVAL;
2443        ti->error = "Bad arguments";
2444bad:
2445        writecache_dtr(ti);
2446        return r;
2447}
2448
2449static void writecache_status(struct dm_target *ti, status_type_t type,
2450                              unsigned status_flags, char *result, unsigned maxlen)
2451{
2452        struct dm_writecache *wc = ti->private;
2453        unsigned extra_args;
2454        unsigned sz = 0;
2455        uint64_t x;
2456
2457        switch (type) {
2458        case STATUSTYPE_INFO:
2459                DMEMIT("%ld %llu %llu %llu", writecache_has_error(wc),
2460                       (unsigned long long)wc->n_blocks, (unsigned long long)wc->freelist_size,
2461                       (unsigned long long)wc->writeback_size);
2462                break;
2463        case STATUSTYPE_TABLE:
2464                DMEMIT("%c %s %s %u ", WC_MODE_PMEM(wc) ? 'p' : 's',
2465                                wc->dev->name, wc->ssd_dev->name, wc->block_size);
2466                extra_args = 0;
2467                if (wc->start_sector)
2468                        extra_args += 2;
2469                if (wc->high_wm_percent_set && !wc->cleaner)
2470                        extra_args += 2;
2471                if (wc->low_wm_percent_set && !wc->cleaner)
2472                        extra_args += 2;
2473                if (wc->max_writeback_jobs_set)
2474                        extra_args += 2;
2475                if (wc->autocommit_blocks_set)
2476                        extra_args += 2;
2477                if (wc->autocommit_time_set)
2478                        extra_args += 2;
2479                if (wc->cleaner)
2480                        extra_args++;
2481                if (wc->writeback_fua_set)
2482                        extra_args++;
2483
2484                DMEMIT("%u", extra_args);
2485                if (wc->start_sector)
2486                        DMEMIT(" start_sector %llu", (unsigned long long)wc->start_sector);
2487                if (wc->high_wm_percent_set && !wc->cleaner) {
2488                        x = (uint64_t)wc->freelist_high_watermark * 100;
2489                        x += wc->n_blocks / 2;
2490                        do_div(x, (size_t)wc->n_blocks);
2491                        DMEMIT(" high_watermark %u", 100 - (unsigned)x);
2492                }
2493                if (wc->low_wm_percent_set && !wc->cleaner) {
2494                        x = (uint64_t)wc->freelist_low_watermark * 100;
2495                        x += wc->n_blocks / 2;
2496                        do_div(x, (size_t)wc->n_blocks);
2497                        DMEMIT(" low_watermark %u", 100 - (unsigned)x);
2498                }
2499                if (wc->max_writeback_jobs_set)
2500                        DMEMIT(" writeback_jobs %u", wc->max_writeback_jobs);
2501                if (wc->autocommit_blocks_set)
2502                        DMEMIT(" autocommit_blocks %u", wc->autocommit_blocks);
2503                if (wc->autocommit_time_set)
2504                        DMEMIT(" autocommit_time %u", jiffies_to_msecs(wc->autocommit_jiffies));
2505                if (wc->max_age != MAX_AGE_UNSPECIFIED)
2506                        DMEMIT(" max_age %u", jiffies_to_msecs(wc->max_age));
2507                if (wc->cleaner)
2508                        DMEMIT(" cleaner");
2509                if (wc->writeback_fua_set)
2510                        DMEMIT(" %sfua", wc->writeback_fua ? "" : "no");
2511                break;
2512        }
2513}
2514
2515static struct target_type writecache_target = {
2516        .name                   = "writecache",
2517        .version                = {1, 3, 0},
2518        .module                 = THIS_MODULE,
2519        .ctr                    = writecache_ctr,
2520        .dtr                    = writecache_dtr,
2521        .status                 = writecache_status,
2522        .postsuspend            = writecache_suspend,
2523        .resume                 = writecache_resume,
2524        .message                = writecache_message,
2525        .map                    = writecache_map,
2526        .end_io                 = writecache_end_io,
2527        .iterate_devices        = writecache_iterate_devices,
2528        .io_hints               = writecache_io_hints,
2529};
2530
2531static int __init dm_writecache_init(void)
2532{
2533        int r;
2534
2535        r = dm_register_target(&writecache_target);
2536        if (r < 0) {
2537                DMERR("register failed %d", r);
2538                return r;
2539        }
2540
2541        return 0;
2542}
2543
2544static void __exit dm_writecache_exit(void)
2545{
2546        dm_unregister_target(&writecache_target);
2547}
2548
2549module_init(dm_writecache_init);
2550module_exit(dm_writecache_exit);
2551
2552MODULE_DESCRIPTION(DM_NAME " writecache target");
2553MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
2554MODULE_LICENSE("GPL");
2555