linux/drivers/md/dm-integrity.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2016-2017 Red Hat, Inc. All rights reserved.
   3 * Copyright (C) 2016-2017 Milan Broz
   4 * Copyright (C) 2016-2017 Mikulas Patocka
   5 *
   6 * This file is released under the GPL.
   7 */
   8
   9#include <linux/compiler.h>
  10#include <linux/module.h>
  11#include <linux/device-mapper.h>
  12#include <linux/dm-io.h>
  13#include <linux/vmalloc.h>
  14#include <linux/sort.h>
  15#include <linux/rbtree.h>
  16#include <linux/delay.h>
  17#include <linux/random.h>
  18#include <linux/reboot.h>
  19#include <crypto/hash.h>
  20#include <crypto/skcipher.h>
  21#include <linux/async_tx.h>
  22#include <linux/dm-bufio.h>
  23
  24#define DM_MSG_PREFIX "integrity"
  25
  26#define DEFAULT_INTERLEAVE_SECTORS      32768
  27#define DEFAULT_JOURNAL_SIZE_FACTOR     7
  28#define DEFAULT_SECTORS_PER_BITMAP_BIT  32768
  29#define DEFAULT_BUFFER_SECTORS          128
  30#define DEFAULT_JOURNAL_WATERMARK       50
  31#define DEFAULT_SYNC_MSEC               10000
  32#define DEFAULT_MAX_JOURNAL_SECTORS     131072
  33#define MIN_LOG2_INTERLEAVE_SECTORS     3
  34#define MAX_LOG2_INTERLEAVE_SECTORS     31
  35#define METADATA_WORKQUEUE_MAX_ACTIVE   16
  36#define RECALC_SECTORS                  8192
  37#define RECALC_WRITE_SUPER              16
  38#define BITMAP_BLOCK_SIZE               4096    /* don't change it */
  39#define BITMAP_FLUSH_INTERVAL           (10 * HZ)
  40
  41/*
  42 * Warning - DEBUG_PRINT prints security-sensitive data to the log,
  43 * so it should not be enabled in the official kernel
  44 */
  45//#define DEBUG_PRINT
  46//#define INTERNAL_VERIFY
  47
  48/*
  49 * On disk structures
  50 */
  51
  52#define SB_MAGIC                        "integrt"
  53#define SB_VERSION_1                    1
  54#define SB_VERSION_2                    2
  55#define SB_VERSION_3                    3
  56#define SB_SECTORS                      8
  57#define MAX_SECTORS_PER_BLOCK           8
  58
  59struct superblock {
  60        __u8 magic[8];
  61        __u8 version;
  62        __u8 log2_interleave_sectors;
  63        __u16 integrity_tag_size;
  64        __u32 journal_sections;
  65        __u64 provided_data_sectors;    /* userspace uses this value */
  66        __u32 flags;
  67        __u8 log2_sectors_per_block;
  68        __u8 log2_blocks_per_bitmap_bit;
  69        __u8 pad[2];
  70        __u64 recalc_sector;
  71};
  72
  73#define SB_FLAG_HAVE_JOURNAL_MAC        0x1
  74#define SB_FLAG_RECALCULATING           0x2
  75#define SB_FLAG_DIRTY_BITMAP            0x4
  76
  77#define JOURNAL_ENTRY_ROUNDUP           8
  78
  79typedef __u64 commit_id_t;
  80#define JOURNAL_MAC_PER_SECTOR          8
  81
  82struct journal_entry {
  83        union {
  84                struct {
  85                        __u32 sector_lo;
  86                        __u32 sector_hi;
  87                } s;
  88                __u64 sector;
  89        } u;
  90        commit_id_t last_bytes[0];
  91        /* __u8 tag[0]; */
  92};
  93
  94#define journal_entry_tag(ic, je)               ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block])
  95
  96#if BITS_PER_LONG == 64
  97#define journal_entry_set_sector(je, x)         do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0)
  98#else
  99#define journal_entry_set_sector(je, x)         do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0)
 100#endif
 101#define journal_entry_get_sector(je)            le64_to_cpu((je)->u.sector)
 102#define journal_entry_is_unused(je)             ((je)->u.s.sector_hi == cpu_to_le32(-1))
 103#define journal_entry_set_unused(je)            do { ((je)->u.s.sector_hi = cpu_to_le32(-1)); } while (0)
 104#define journal_entry_is_inprogress(je)         ((je)->u.s.sector_hi == cpu_to_le32(-2))
 105#define journal_entry_set_inprogress(je)        do { ((je)->u.s.sector_hi = cpu_to_le32(-2)); } while (0)
 106
 107#define JOURNAL_BLOCK_SECTORS           8
 108#define JOURNAL_SECTOR_DATA             ((1 << SECTOR_SHIFT) - sizeof(commit_id_t))
 109#define JOURNAL_MAC_SIZE                (JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS)
 110
 111struct journal_sector {
 112        __u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR];
 113        __u8 mac[JOURNAL_MAC_PER_SECTOR];
 114        commit_id_t commit_id;
 115};
 116
 117#define MAX_TAG_SIZE                    (JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK]))
 118
 119#define METADATA_PADDING_SECTORS        8
 120
 121#define N_COMMIT_IDS                    4
 122
 123static unsigned char prev_commit_seq(unsigned char seq)
 124{
 125        return (seq + N_COMMIT_IDS - 1) % N_COMMIT_IDS;
 126}
 127
 128static unsigned char next_commit_seq(unsigned char seq)
 129{
 130        return (seq + 1) % N_COMMIT_IDS;
 131}
 132
 133/*
 134 * In-memory structures
 135 */
 136
 137struct journal_node {
 138        struct rb_node node;
 139        sector_t sector;
 140};
 141
 142struct alg_spec {
 143        char *alg_string;
 144        char *key_string;
 145        __u8 *key;
 146        unsigned key_size;
 147};
 148
 149struct dm_integrity_c {
 150        struct dm_dev *dev;
 151        struct dm_dev *meta_dev;
 152        unsigned tag_size;
 153        __s8 log2_tag_size;
 154        sector_t start;
 155        mempool_t journal_io_mempool;
 156        struct dm_io_client *io;
 157        struct dm_bufio_client *bufio;
 158        struct workqueue_struct *metadata_wq;
 159        struct superblock *sb;
 160        unsigned journal_pages;
 161        unsigned n_bitmap_blocks;
 162
 163        struct page_list *journal;
 164        struct page_list *journal_io;
 165        struct page_list *journal_xor;
 166        struct page_list *recalc_bitmap;
 167        struct page_list *may_write_bitmap;
 168        struct bitmap_block_status *bbs;
 169        unsigned bitmap_flush_interval;
 170        int synchronous_mode;
 171        struct bio_list synchronous_bios;
 172        struct delayed_work bitmap_flush_work;
 173
 174        struct crypto_skcipher *journal_crypt;
 175        struct scatterlist **journal_scatterlist;
 176        struct scatterlist **journal_io_scatterlist;
 177        struct skcipher_request **sk_requests;
 178
 179        struct crypto_shash *journal_mac;
 180
 181        struct journal_node *journal_tree;
 182        struct rb_root journal_tree_root;
 183
 184        sector_t provided_data_sectors;
 185
 186        unsigned short journal_entry_size;
 187        unsigned char journal_entries_per_sector;
 188        unsigned char journal_section_entries;
 189        unsigned short journal_section_sectors;
 190        unsigned journal_sections;
 191        unsigned journal_entries;
 192        sector_t data_device_sectors;
 193        sector_t meta_device_sectors;
 194        unsigned initial_sectors;
 195        unsigned metadata_run;
 196        __s8 log2_metadata_run;
 197        __u8 log2_buffer_sectors;
 198        __u8 sectors_per_block;
 199        __u8 log2_blocks_per_bitmap_bit;
 200
 201        unsigned char mode;
 202        int suspending;
 203
 204        int failed;
 205
 206        struct crypto_shash *internal_hash;
 207
 208        /* these variables are locked with endio_wait.lock */
 209        struct rb_root in_progress;
 210        struct list_head wait_list;
 211        wait_queue_head_t endio_wait;
 212        struct workqueue_struct *wait_wq;
 213
 214        unsigned char commit_seq;
 215        commit_id_t commit_ids[N_COMMIT_IDS];
 216
 217        unsigned committed_section;
 218        unsigned n_committed_sections;
 219
 220        unsigned uncommitted_section;
 221        unsigned n_uncommitted_sections;
 222
 223        unsigned free_section;
 224        unsigned char free_section_entry;
 225        unsigned free_sectors;
 226
 227        unsigned free_sectors_threshold;
 228
 229        struct workqueue_struct *commit_wq;
 230        struct work_struct commit_work;
 231
 232        struct workqueue_struct *writer_wq;
 233        struct work_struct writer_work;
 234
 235        struct workqueue_struct *recalc_wq;
 236        struct work_struct recalc_work;
 237        u8 *recalc_buffer;
 238        u8 *recalc_tags;
 239
 240        struct bio_list flush_bio_list;
 241
 242        unsigned long autocommit_jiffies;
 243        struct timer_list autocommit_timer;
 244        unsigned autocommit_msec;
 245
 246        wait_queue_head_t copy_to_journal_wait;
 247
 248        struct completion crypto_backoff;
 249
 250        bool journal_uptodate;
 251        bool just_formatted;
 252        bool recalculate_flag;
 253
 254        struct alg_spec internal_hash_alg;
 255        struct alg_spec journal_crypt_alg;
 256        struct alg_spec journal_mac_alg;
 257
 258        atomic64_t number_of_mismatches;
 259
 260        struct notifier_block reboot_notifier;
 261};
 262
 263struct dm_integrity_range {
 264        sector_t logical_sector;
 265        sector_t n_sectors;
 266        bool waiting;
 267        union {
 268                struct rb_node node;
 269                struct {
 270                        struct task_struct *task;
 271                        struct list_head wait_entry;
 272                };
 273        };
 274};
 275
 276struct dm_integrity_io {
 277        struct work_struct work;
 278
 279        struct dm_integrity_c *ic;
 280        bool write;
 281        bool fua;
 282
 283        struct dm_integrity_range range;
 284
 285        sector_t metadata_block;
 286        unsigned metadata_offset;
 287
 288        atomic_t in_flight;
 289        blk_status_t bi_status;
 290
 291        struct completion *completion;
 292
 293        struct gendisk *orig_bi_disk;
 294        u8 orig_bi_partno;
 295        bio_end_io_t *orig_bi_end_io;
 296        struct bio_integrity_payload *orig_bi_integrity;
 297        struct bvec_iter orig_bi_iter;
 298};
 299
 300struct journal_completion {
 301        struct dm_integrity_c *ic;
 302        atomic_t in_flight;
 303        struct completion comp;
 304};
 305
 306struct journal_io {
 307        struct dm_integrity_range range;
 308        struct journal_completion *comp;
 309};
 310
 311struct bitmap_block_status {
 312        struct work_struct work;
 313        struct dm_integrity_c *ic;
 314        unsigned idx;
 315        unsigned long *bitmap;
 316        struct bio_list bio_queue;
 317        spinlock_t bio_queue_lock;
 318
 319};
 320
 321static struct kmem_cache *journal_io_cache;
 322
 323#define JOURNAL_IO_MEMPOOL      32
 324
 325#ifdef DEBUG_PRINT
 326#define DEBUG_print(x, ...)     printk(KERN_DEBUG x, ##__VA_ARGS__)
 327static void __DEBUG_bytes(__u8 *bytes, size_t len, const char *msg, ...)
 328{
 329        va_list args;
 330        va_start(args, msg);
 331        vprintk(msg, args);
 332        va_end(args);
 333        if (len)
 334                pr_cont(":");
 335        while (len) {
 336                pr_cont(" %02x", *bytes);
 337                bytes++;
 338                len--;
 339        }
 340        pr_cont("\n");
 341}
 342#define DEBUG_bytes(bytes, len, msg, ...)       __DEBUG_bytes(bytes, len, KERN_DEBUG msg, ##__VA_ARGS__)
 343#else
 344#define DEBUG_print(x, ...)                     do { } while (0)
 345#define DEBUG_bytes(bytes, len, msg, ...)       do { } while (0)
 346#endif
 347
 348static void dm_integrity_prepare(struct request *rq)
 349{
 350}
 351
 352static void dm_integrity_complete(struct request *rq, unsigned int nr_bytes)
 353{
 354}
 355
 356/*
 357 * DM Integrity profile, protection is performed layer above (dm-crypt)
 358 */
 359static const struct blk_integrity_profile dm_integrity_profile = {
 360        .name                   = "DM-DIF-EXT-TAG",
 361        .generate_fn            = NULL,
 362        .verify_fn              = NULL,
 363        .prepare_fn             = dm_integrity_prepare,
 364        .complete_fn            = dm_integrity_complete,
 365};
 366
 367static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);
 368static void integrity_bio_wait(struct work_struct *w);
 369static void dm_integrity_dtr(struct dm_target *ti);
 370
 371static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err)
 372{
 373        if (err == -EILSEQ)
 374                atomic64_inc(&ic->number_of_mismatches);
 375        if (!cmpxchg(&ic->failed, 0, err))
 376                DMERR("Error on %s: %d", msg, err);
 377}
 378
 379static int dm_integrity_failed(struct dm_integrity_c *ic)
 380{
 381        return READ_ONCE(ic->failed);
 382}
 383
 384static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
 385                                          unsigned j, unsigned char seq)
 386{
 387        /*
 388         * Xor the number with section and sector, so that if a piece of
 389         * journal is written at wrong place, it is detected.
 390         */
 391        return ic->commit_ids[seq] ^ cpu_to_le64(((__u64)i << 32) ^ j);
 392}
 393
 394static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
 395                                sector_t *area, sector_t *offset)
 396{
 397        if (!ic->meta_dev) {
 398                __u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors;
 399                *area = data_sector >> log2_interleave_sectors;
 400                *offset = (unsigned)data_sector & ((1U << log2_interleave_sectors) - 1);
 401        } else {
 402                *area = 0;
 403                *offset = data_sector;
 404        }
 405}
 406
 407#define sector_to_block(ic, n)                                          \
 408do {                                                                    \
 409        BUG_ON((n) & (unsigned)((ic)->sectors_per_block - 1));          \
 410        (n) >>= (ic)->sb->log2_sectors_per_block;                       \
 411} while (0)
 412
 413static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area,
 414                                            sector_t offset, unsigned *metadata_offset)
 415{
 416        __u64 ms;
 417        unsigned mo;
 418
 419        ms = area << ic->sb->log2_interleave_sectors;
 420        if (likely(ic->log2_metadata_run >= 0))
 421                ms += area << ic->log2_metadata_run;
 422        else
 423                ms += area * ic->metadata_run;
 424        ms >>= ic->log2_buffer_sectors;
 425
 426        sector_to_block(ic, offset);
 427
 428        if (likely(ic->log2_tag_size >= 0)) {
 429                ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size);
 430                mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
 431        } else {
 432                ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors);
 433                mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
 434        }
 435        *metadata_offset = mo;
 436        return ms;
 437}
 438
 439static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset)
 440{
 441        sector_t result;
 442
 443        if (ic->meta_dev)
 444                return offset;
 445
 446        result = area << ic->sb->log2_interleave_sectors;
 447        if (likely(ic->log2_metadata_run >= 0))
 448                result += (area + 1) << ic->log2_metadata_run;
 449        else
 450                result += (area + 1) * ic->metadata_run;
 451
 452        result += (sector_t)ic->initial_sectors + offset;
 453        result += ic->start;
 454
 455        return result;
 456}
 457
 458static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr)
 459{
 460        if (unlikely(*sec_ptr >= ic->journal_sections))
 461                *sec_ptr -= ic->journal_sections;
 462}
 463
 464static void sb_set_version(struct dm_integrity_c *ic)
 465{
 466        if (ic->mode == 'B' || ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP))
 467                ic->sb->version = SB_VERSION_3;
 468        else if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
 469                ic->sb->version = SB_VERSION_2;
 470        else
 471                ic->sb->version = SB_VERSION_1;
 472}
 473
 474static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags)
 475{
 476        struct dm_io_request io_req;
 477        struct dm_io_region io_loc;
 478
 479        io_req.bi_op = op;
 480        io_req.bi_op_flags = op_flags;
 481        io_req.mem.type = DM_IO_KMEM;
 482        io_req.mem.ptr.addr = ic->sb;
 483        io_req.notify.fn = NULL;
 484        io_req.client = ic->io;
 485        io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
 486        io_loc.sector = ic->start;
 487        io_loc.count = SB_SECTORS;
 488
 489        if (op == REQ_OP_WRITE)
 490                sb_set_version(ic);
 491
 492        return dm_io(&io_req, 1, &io_loc, NULL);
 493}
 494
 495#define BITMAP_OP_TEST_ALL_SET          0
 496#define BITMAP_OP_TEST_ALL_CLEAR        1
 497#define BITMAP_OP_SET                   2
 498#define BITMAP_OP_CLEAR                 3
 499
 500static bool block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap,
 501                            sector_t sector, sector_t n_sectors, int mode)
 502{
 503        unsigned long bit, end_bit, this_end_bit, page, end_page;
 504        unsigned long *data;
 505
 506        if (unlikely(((sector | n_sectors) & ((1 << ic->sb->log2_sectors_per_block) - 1)) != 0)) {
 507                DMCRIT("invalid bitmap access (%llx,%llx,%d,%d,%d)",
 508                        (unsigned long long)sector,
 509                        (unsigned long long)n_sectors,
 510                        ic->sb->log2_sectors_per_block,
 511                        ic->log2_blocks_per_bitmap_bit,
 512                        mode);
 513                BUG();
 514        }
 515
 516        if (unlikely(!n_sectors))
 517                return true;
 518
 519        bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
 520        end_bit = (sector + n_sectors - 1) >>
 521                (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
 522
 523        page = bit / (PAGE_SIZE * 8);
 524        bit %= PAGE_SIZE * 8;
 525
 526        end_page = end_bit / (PAGE_SIZE * 8);
 527        end_bit %= PAGE_SIZE * 8;
 528
 529repeat:
 530        if (page < end_page) {
 531                this_end_bit = PAGE_SIZE * 8 - 1;
 532        } else {
 533                this_end_bit = end_bit;
 534        }
 535
 536        data = lowmem_page_address(bitmap[page].page);
 537
 538        if (mode == BITMAP_OP_TEST_ALL_SET) {
 539                while (bit <= this_end_bit) {
 540                        if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
 541                                do {
 542                                        if (data[bit / BITS_PER_LONG] != -1)
 543                                                return false;
 544                                        bit += BITS_PER_LONG;
 545                                } while (this_end_bit >= bit + BITS_PER_LONG - 1);
 546                                continue;
 547                        }
 548                        if (!test_bit(bit, data))
 549                                return false;
 550                        bit++;
 551                }
 552        } else if (mode == BITMAP_OP_TEST_ALL_CLEAR) {
 553                while (bit <= this_end_bit) {
 554                        if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
 555                                do {
 556                                        if (data[bit / BITS_PER_LONG] != 0)
 557                                                return false;
 558                                        bit += BITS_PER_LONG;
 559                                } while (this_end_bit >= bit + BITS_PER_LONG - 1);
 560                                continue;
 561                        }
 562                        if (test_bit(bit, data))
 563                                return false;
 564                        bit++;
 565                }
 566        } else if (mode == BITMAP_OP_SET) {
 567                while (bit <= this_end_bit) {
 568                        if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
 569                                do {
 570                                        data[bit / BITS_PER_LONG] = -1;
 571                                        bit += BITS_PER_LONG;
 572                                } while (this_end_bit >= bit + BITS_PER_LONG - 1);
 573                                continue;
 574                        }
 575                        __set_bit(bit, data);
 576                        bit++;
 577                }
 578        } else if (mode == BITMAP_OP_CLEAR) {
 579                if (!bit && this_end_bit == PAGE_SIZE * 8 - 1)
 580                        clear_page(data);
 581                else while (bit <= this_end_bit) {
 582                        if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
 583                                do {
 584                                        data[bit / BITS_PER_LONG] = 0;
 585                                        bit += BITS_PER_LONG;
 586                                } while (this_end_bit >= bit + BITS_PER_LONG - 1);
 587                                continue;
 588                        }
 589                        __clear_bit(bit, data);
 590                        bit++;
 591                }
 592        } else {
 593                BUG();
 594        }
 595
 596        if (unlikely(page < end_page)) {
 597                bit = 0;
 598                page++;
 599                goto repeat;
 600        }
 601
 602        return true;
 603}
 604
 605static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst, struct page_list *src)
 606{
 607        unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
 608        unsigned i;
 609
 610        for (i = 0; i < n_bitmap_pages; i++) {
 611                unsigned long *dst_data = lowmem_page_address(dst[i].page);
 612                unsigned long *src_data = lowmem_page_address(src[i].page);
 613                copy_page(dst_data, src_data);
 614        }
 615}
 616
 617static struct bitmap_block_status *sector_to_bitmap_block(struct dm_integrity_c *ic, sector_t sector)
 618{
 619        unsigned bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
 620        unsigned bitmap_block = bit / (BITMAP_BLOCK_SIZE * 8);
 621
 622        BUG_ON(bitmap_block >= ic->n_bitmap_blocks);
 623        return &ic->bbs[bitmap_block];
 624}
 625
 626static void access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset,
 627                                 bool e, const char *function)
 628{
 629#if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY)
 630        unsigned limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
 631
 632        if (unlikely(section >= ic->journal_sections) ||
 633            unlikely(offset >= limit)) {
 634                DMCRIT("%s: invalid access at (%u,%u), limit (%u,%u)",
 635                       function, section, offset, ic->journal_sections, limit);
 636                BUG();
 637        }
 638#endif
 639}
 640
 641static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsigned offset,
 642                               unsigned *pl_index, unsigned *pl_offset)
 643{
 644        unsigned sector;
 645
 646        access_journal_check(ic, section, offset, false, "page_list_location");
 647
 648        sector = section * ic->journal_section_sectors + offset;
 649
 650        *pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
 651        *pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
 652}
 653
 654static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl,
 655                                               unsigned section, unsigned offset, unsigned *n_sectors)
 656{
 657        unsigned pl_index, pl_offset;
 658        char *va;
 659
 660        page_list_location(ic, section, offset, &pl_index, &pl_offset);
 661
 662        if (n_sectors)
 663                *n_sectors = (PAGE_SIZE - pl_offset) >> SECTOR_SHIFT;
 664
 665        va = lowmem_page_address(pl[pl_index].page);
 666
 667        return (struct journal_sector *)(va + pl_offset);
 668}
 669
 670static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset)
 671{
 672        return access_page_list(ic, ic->journal, section, offset, NULL);
 673}
 674
 675static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned section, unsigned n)
 676{
 677        unsigned rel_sector, offset;
 678        struct journal_sector *js;
 679
 680        access_journal_check(ic, section, n, true, "access_journal_entry");
 681
 682        rel_sector = n % JOURNAL_BLOCK_SECTORS;
 683        offset = n / JOURNAL_BLOCK_SECTORS;
 684
 685        js = access_journal(ic, section, rel_sector);
 686        return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size);
 687}
 688
 689static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned section, unsigned n)
 690{
 691        n <<= ic->sb->log2_sectors_per_block;
 692
 693        n += JOURNAL_BLOCK_SECTORS;
 694
 695        access_journal_check(ic, section, n, false, "access_journal_data");
 696
 697        return access_journal(ic, section, n);
 698}
 699
 700static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result[JOURNAL_MAC_SIZE])
 701{
 702        SHASH_DESC_ON_STACK(desc, ic->journal_mac);
 703        int r;
 704        unsigned j, size;
 705
 706        desc->tfm = ic->journal_mac;
 707
 708        r = crypto_shash_init(desc);
 709        if (unlikely(r)) {
 710                dm_integrity_io_error(ic, "crypto_shash_init", r);
 711                goto err;
 712        }
 713
 714        for (j = 0; j < ic->journal_section_entries; j++) {
 715                struct journal_entry *je = access_journal_entry(ic, section, j);
 716                r = crypto_shash_update(desc, (__u8 *)&je->u.sector, sizeof je->u.sector);
 717                if (unlikely(r)) {
 718                        dm_integrity_io_error(ic, "crypto_shash_update", r);
 719                        goto err;
 720                }
 721        }
 722
 723        size = crypto_shash_digestsize(ic->journal_mac);
 724
 725        if (likely(size <= JOURNAL_MAC_SIZE)) {
 726                r = crypto_shash_final(desc, result);
 727                if (unlikely(r)) {
 728                        dm_integrity_io_error(ic, "crypto_shash_final", r);
 729                        goto err;
 730                }
 731                memset(result + size, 0, JOURNAL_MAC_SIZE - size);
 732        } else {
 733                __u8 digest[HASH_MAX_DIGESTSIZE];
 734
 735                if (WARN_ON(size > sizeof(digest))) {
 736                        dm_integrity_io_error(ic, "digest_size", -EINVAL);
 737                        goto err;
 738                }
 739                r = crypto_shash_final(desc, digest);
 740                if (unlikely(r)) {
 741                        dm_integrity_io_error(ic, "crypto_shash_final", r);
 742                        goto err;
 743                }
 744                memcpy(result, digest, JOURNAL_MAC_SIZE);
 745        }
 746
 747        return;
 748err:
 749        memset(result, 0, JOURNAL_MAC_SIZE);
 750}
 751
 752static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr)
 753{
 754        __u8 result[JOURNAL_MAC_SIZE];
 755        unsigned j;
 756
 757        if (!ic->journal_mac)
 758                return;
 759
 760        section_mac(ic, section, result);
 761
 762        for (j = 0; j < JOURNAL_BLOCK_SECTORS; j++) {
 763                struct journal_sector *js = access_journal(ic, section, j);
 764
 765                if (likely(wr))
 766                        memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR);
 767                else {
 768                        if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR))
 769                                dm_integrity_io_error(ic, "journal mac", -EILSEQ);
 770                }
 771        }
 772}
 773
 774static void complete_journal_op(void *context)
 775{
 776        struct journal_completion *comp = context;
 777        BUG_ON(!atomic_read(&comp->in_flight));
 778        if (likely(atomic_dec_and_test(&comp->in_flight)))
 779                complete(&comp->comp);
 780}
 781
 782static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
 783                        unsigned n_sections, struct journal_completion *comp)
 784{
 785        struct async_submit_ctl submit;
 786        size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT;
 787        unsigned pl_index, pl_offset, section_index;
 788        struct page_list *source_pl, *target_pl;
 789
 790        if (likely(encrypt)) {
 791                source_pl = ic->journal;
 792                target_pl = ic->journal_io;
 793        } else {
 794                source_pl = ic->journal_io;
 795                target_pl = ic->journal;
 796        }
 797
 798        page_list_location(ic, section, 0, &pl_index, &pl_offset);
 799
 800        atomic_add(roundup(pl_offset + n_bytes, PAGE_SIZE) >> PAGE_SHIFT, &comp->in_flight);
 801
 802        init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, complete_journal_op, comp, NULL);
 803
 804        section_index = pl_index;
 805
 806        do {
 807                size_t this_step;
 808                struct page *src_pages[2];
 809                struct page *dst_page;
 810
 811                while (unlikely(pl_index == section_index)) {
 812                        unsigned dummy;
 813                        if (likely(encrypt))
 814                                rw_section_mac(ic, section, true);
 815                        section++;
 816                        n_sections--;
 817                        if (!n_sections)
 818                                break;
 819                        page_list_location(ic, section, 0, &section_index, &dummy);
 820                }
 821
 822                this_step = min(n_bytes, (size_t)PAGE_SIZE - pl_offset);
 823                dst_page = target_pl[pl_index].page;
 824                src_pages[0] = source_pl[pl_index].page;
 825                src_pages[1] = ic->journal_xor[pl_index].page;
 826
 827                async_xor(dst_page, src_pages, pl_offset, 2, this_step, &submit);
 828
 829                pl_index++;
 830                pl_offset = 0;
 831                n_bytes -= this_step;
 832        } while (n_bytes);
 833
 834        BUG_ON(n_sections);
 835
 836        async_tx_issue_pending_all();
 837}
 838
 839static void complete_journal_encrypt(struct crypto_async_request *req, int err)
 840{
 841        struct journal_completion *comp = req->data;
 842        if (unlikely(err)) {
 843                if (likely(err == -EINPROGRESS)) {
 844                        complete(&comp->ic->crypto_backoff);
 845                        return;
 846                }
 847                dm_integrity_io_error(comp->ic, "asynchronous encrypt", err);
 848        }
 849        complete_journal_op(comp);
 850}
 851
 852static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
 853{
 854        int r;
 855        skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
 856                                      complete_journal_encrypt, comp);
 857        if (likely(encrypt))
 858                r = crypto_skcipher_encrypt(req);
 859        else
 860                r = crypto_skcipher_decrypt(req);
 861        if (likely(!r))
 862                return false;
 863        if (likely(r == -EINPROGRESS))
 864                return true;
 865        if (likely(r == -EBUSY)) {
 866                wait_for_completion(&comp->ic->crypto_backoff);
 867                reinit_completion(&comp->ic->crypto_backoff);
 868                return true;
 869        }
 870        dm_integrity_io_error(comp->ic, "encrypt", r);
 871        return false;
 872}
 873
 874static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
 875                          unsigned n_sections, struct journal_completion *comp)
 876{
 877        struct scatterlist **source_sg;
 878        struct scatterlist **target_sg;
 879
 880        atomic_add(2, &comp->in_flight);
 881
 882        if (likely(encrypt)) {
 883                source_sg = ic->journal_scatterlist;
 884                target_sg = ic->journal_io_scatterlist;
 885        } else {
 886                source_sg = ic->journal_io_scatterlist;
 887                target_sg = ic->journal_scatterlist;
 888        }
 889
 890        do {
 891                struct skcipher_request *req;
 892                unsigned ivsize;
 893                char *iv;
 894
 895                if (likely(encrypt))
 896                        rw_section_mac(ic, section, true);
 897
 898                req = ic->sk_requests[section];
 899                ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
 900                iv = req->iv;
 901
 902                memcpy(iv, iv + ivsize, ivsize);
 903
 904                req->src = source_sg[section];
 905                req->dst = target_sg[section];
 906
 907                if (unlikely(do_crypt(encrypt, req, comp)))
 908                        atomic_inc(&comp->in_flight);
 909
 910                section++;
 911                n_sections--;
 912        } while (n_sections);
 913
 914        atomic_dec(&comp->in_flight);
 915        complete_journal_op(comp);
 916}
 917
 918static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
 919                            unsigned n_sections, struct journal_completion *comp)
 920{
 921        if (ic->journal_xor)
 922                return xor_journal(ic, encrypt, section, n_sections, comp);
 923        else
 924                return crypt_journal(ic, encrypt, section, n_sections, comp);
 925}
 926
 927static void complete_journal_io(unsigned long error, void *context)
 928{
 929        struct journal_completion *comp = context;
 930        if (unlikely(error != 0))
 931                dm_integrity_io_error(comp->ic, "writing journal", -EIO);
 932        complete_journal_op(comp);
 933}
 934
 935static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags,
 936                               unsigned sector, unsigned n_sectors, struct journal_completion *comp)
 937{
 938        struct dm_io_request io_req;
 939        struct dm_io_region io_loc;
 940        unsigned pl_index, pl_offset;
 941        int r;
 942
 943        if (unlikely(dm_integrity_failed(ic))) {
 944                if (comp)
 945                        complete_journal_io(-1UL, comp);
 946                return;
 947        }
 948
 949        pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
 950        pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
 951
 952        io_req.bi_op = op;
 953        io_req.bi_op_flags = op_flags;
 954        io_req.mem.type = DM_IO_PAGE_LIST;
 955        if (ic->journal_io)
 956                io_req.mem.ptr.pl = &ic->journal_io[pl_index];
 957        else
 958                io_req.mem.ptr.pl = &ic->journal[pl_index];
 959        io_req.mem.offset = pl_offset;
 960        if (likely(comp != NULL)) {
 961                io_req.notify.fn = complete_journal_io;
 962                io_req.notify.context = comp;
 963        } else {
 964                io_req.notify.fn = NULL;
 965        }
 966        io_req.client = ic->io;
 967        io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
 968        io_loc.sector = ic->start + SB_SECTORS + sector;
 969        io_loc.count = n_sectors;
 970
 971        r = dm_io(&io_req, 1, &io_loc, NULL);
 972        if (unlikely(r)) {
 973                dm_integrity_io_error(ic, op == REQ_OP_READ ? "reading journal" : "writing journal", r);
 974                if (comp) {
 975                        WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
 976                        complete_journal_io(-1UL, comp);
 977                }
 978        }
 979}
 980
 981static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section,
 982                       unsigned n_sections, struct journal_completion *comp)
 983{
 984        unsigned sector, n_sectors;
 985
 986        sector = section * ic->journal_section_sectors;
 987        n_sectors = n_sections * ic->journal_section_sectors;
 988
 989        rw_journal_sectors(ic, op, op_flags, sector, n_sectors, comp);
 990}
 991
 992static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections)
 993{
 994        struct journal_completion io_comp;
 995        struct journal_completion crypt_comp_1;
 996        struct journal_completion crypt_comp_2;
 997        unsigned i;
 998
 999        io_comp.ic = ic;
1000        init_completion(&io_comp.comp);
1001
1002        if (commit_start + commit_sections <= ic->journal_sections) {
1003                io_comp.in_flight = (atomic_t)ATOMIC_INIT(1);
1004                if (ic->journal_io) {
1005                        crypt_comp_1.ic = ic;
1006                        init_completion(&crypt_comp_1.comp);
1007                        crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1008                        encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1);
1009                        wait_for_completion_io(&crypt_comp_1.comp);
1010                } else {
1011                        for (i = 0; i < commit_sections; i++)
1012                                rw_section_mac(ic, commit_start + i, true);
1013                }
1014                rw_journal(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, commit_start,
1015                           commit_sections, &io_comp);
1016        } else {
1017                unsigned to_end;
1018                io_comp.in_flight = (atomic_t)ATOMIC_INIT(2);
1019                to_end = ic->journal_sections - commit_start;
1020                if (ic->journal_io) {
1021                        crypt_comp_1.ic = ic;
1022                        init_completion(&crypt_comp_1.comp);
1023                        crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1024                        encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1);
1025                        if (try_wait_for_completion(&crypt_comp_1.comp)) {
1026                                rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
1027                                reinit_completion(&crypt_comp_1.comp);
1028                                crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1029                                encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1);
1030                                wait_for_completion_io(&crypt_comp_1.comp);
1031                        } else {
1032                                crypt_comp_2.ic = ic;
1033                                init_completion(&crypt_comp_2.comp);
1034                                crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0);
1035                                encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2);
1036                                wait_for_completion_io(&crypt_comp_1.comp);
1037                                rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
1038                                wait_for_completion_io(&crypt_comp_2.comp);
1039                        }
1040                } else {
1041                        for (i = 0; i < to_end; i++)
1042                                rw_section_mac(ic, commit_start + i, true);
1043                        rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
1044                        for (i = 0; i < commit_sections - to_end; i++)
1045                                rw_section_mac(ic, i, true);
1046                }
1047                rw_journal(ic, REQ_OP_WRITE, REQ_FUA, 0, commit_sections - to_end, &io_comp);
1048        }
1049
1050        wait_for_completion_io(&io_comp.comp);
1051}
1052
1053static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset,
1054                              unsigned n_sectors, sector_t target, io_notify_fn fn, void *data)
1055{
1056        struct dm_io_request io_req;
1057        struct dm_io_region io_loc;
1058        int r;
1059        unsigned sector, pl_index, pl_offset;
1060
1061        BUG_ON((target | n_sectors | offset) & (unsigned)(ic->sectors_per_block - 1));
1062
1063        if (unlikely(dm_integrity_failed(ic))) {
1064                fn(-1UL, data);
1065                return;
1066        }
1067
1068        sector = section * ic->journal_section_sectors + JOURNAL_BLOCK_SECTORS + offset;
1069
1070        pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
1071        pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
1072
1073        io_req.bi_op = REQ_OP_WRITE;
1074        io_req.bi_op_flags = 0;
1075        io_req.mem.type = DM_IO_PAGE_LIST;
1076        io_req.mem.ptr.pl = &ic->journal[pl_index];
1077        io_req.mem.offset = pl_offset;
1078        io_req.notify.fn = fn;
1079        io_req.notify.context = data;
1080        io_req.client = ic->io;
1081        io_loc.bdev = ic->dev->bdev;
1082        io_loc.sector = target;
1083        io_loc.count = n_sectors;
1084
1085        r = dm_io(&io_req, 1, &io_loc, NULL);
1086        if (unlikely(r)) {
1087                WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
1088                fn(-1UL, data);
1089        }
1090}
1091
1092static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)
1093{
1094        return range1->logical_sector < range2->logical_sector + range2->n_sectors &&
1095               range1->logical_sector + range1->n_sectors > range2->logical_sector;
1096}
1097
1098static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
1099{
1100        struct rb_node **n = &ic->in_progress.rb_node;
1101        struct rb_node *parent;
1102
1103        BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned)(ic->sectors_per_block - 1));
1104
1105        if (likely(check_waiting)) {
1106                struct dm_integrity_range *range;
1107                list_for_each_entry(range, &ic->wait_list, wait_entry) {
1108                        if (unlikely(ranges_overlap(range, new_range)))
1109                                return false;
1110                }
1111        }
1112
1113        parent = NULL;
1114
1115        while (*n) {
1116                struct dm_integrity_range *range = container_of(*n, struct dm_integrity_range, node);
1117
1118                parent = *n;
1119                if (new_range->logical_sector + new_range->n_sectors <= range->logical_sector) {
1120                        n = &range->node.rb_left;
1121                } else if (new_range->logical_sector >= range->logical_sector + range->n_sectors) {
1122                        n = &range->node.rb_right;
1123                } else {
1124                        return false;
1125                }
1126        }
1127
1128        rb_link_node(&new_range->node, parent, n);
1129        rb_insert_color(&new_range->node, &ic->in_progress);
1130
1131        return true;
1132}
1133
1134static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range)
1135{
1136        rb_erase(&range->node, &ic->in_progress);
1137        while (unlikely(!list_empty(&ic->wait_list))) {
1138                struct dm_integrity_range *last_range =
1139                        list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
1140                struct task_struct *last_range_task;
1141                last_range_task = last_range->task;
1142                list_del(&last_range->wait_entry);
1143                if (!add_new_range(ic, last_range, false)) {
1144                        last_range->task = last_range_task;
1145                        list_add(&last_range->wait_entry, &ic->wait_list);
1146                        break;
1147                }
1148                last_range->waiting = false;
1149                wake_up_process(last_range_task);
1150        }
1151}
1152
1153static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range)
1154{
1155        unsigned long flags;
1156
1157        spin_lock_irqsave(&ic->endio_wait.lock, flags);
1158        remove_range_unlocked(ic, range);
1159        spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1160}
1161
1162static void wait_and_add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
1163{
1164        new_range->waiting = true;
1165        list_add_tail(&new_range->wait_entry, &ic->wait_list);
1166        new_range->task = current;
1167        do {
1168                __set_current_state(TASK_UNINTERRUPTIBLE);
1169                spin_unlock_irq(&ic->endio_wait.lock);
1170                io_schedule();
1171                spin_lock_irq(&ic->endio_wait.lock);
1172        } while (unlikely(new_range->waiting));
1173}
1174
1175static void add_new_range_and_wait(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
1176{
1177        if (unlikely(!add_new_range(ic, new_range, true)))
1178                wait_and_add_new_range(ic, new_range);
1179}
1180
1181static void init_journal_node(struct journal_node *node)
1182{
1183        RB_CLEAR_NODE(&node->node);
1184        node->sector = (sector_t)-1;
1185}
1186
1187static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector)
1188{
1189        struct rb_node **link;
1190        struct rb_node *parent;
1191
1192        node->sector = sector;
1193        BUG_ON(!RB_EMPTY_NODE(&node->node));
1194
1195        link = &ic->journal_tree_root.rb_node;
1196        parent = NULL;
1197
1198        while (*link) {
1199                struct journal_node *j;
1200                parent = *link;
1201                j = container_of(parent, struct journal_node, node);
1202                if (sector < j->sector)
1203                        link = &j->node.rb_left;
1204                else
1205                        link = &j->node.rb_right;
1206        }
1207
1208        rb_link_node(&node->node, parent, link);
1209        rb_insert_color(&node->node, &ic->journal_tree_root);
1210}
1211
1212static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node)
1213{
1214        BUG_ON(RB_EMPTY_NODE(&node->node));
1215        rb_erase(&node->node, &ic->journal_tree_root);
1216        init_journal_node(node);
1217}
1218
1219#define NOT_FOUND       (-1U)
1220
1221static unsigned find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)
1222{
1223        struct rb_node *n = ic->journal_tree_root.rb_node;
1224        unsigned found = NOT_FOUND;
1225        *next_sector = (sector_t)-1;
1226        while (n) {
1227                struct journal_node *j = container_of(n, struct journal_node, node);
1228                if (sector == j->sector) {
1229                        found = j - ic->journal_tree;
1230                }
1231                if (sector < j->sector) {
1232                        *next_sector = j->sector;
1233                        n = j->node.rb_left;
1234                } else {
1235                        n = j->node.rb_right;
1236                }
1237        }
1238
1239        return found;
1240}
1241
1242static bool test_journal_node(struct dm_integrity_c *ic, unsigned pos, sector_t sector)
1243{
1244        struct journal_node *node, *next_node;
1245        struct rb_node *next;
1246
1247        if (unlikely(pos >= ic->journal_entries))
1248                return false;
1249        node = &ic->journal_tree[pos];
1250        if (unlikely(RB_EMPTY_NODE(&node->node)))
1251                return false;
1252        if (unlikely(node->sector != sector))
1253                return false;
1254
1255        next = rb_next(&node->node);
1256        if (unlikely(!next))
1257                return true;
1258
1259        next_node = container_of(next, struct journal_node, node);
1260        return next_node->sector != sector;
1261}
1262
1263static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node)
1264{
1265        struct rb_node *next;
1266        struct journal_node *next_node;
1267        unsigned next_section;
1268
1269        BUG_ON(RB_EMPTY_NODE(&node->node));
1270
1271        next = rb_next(&node->node);
1272        if (unlikely(!next))
1273                return false;
1274
1275        next_node = container_of(next, struct journal_node, node);
1276
1277        if (next_node->sector != node->sector)
1278                return false;
1279
1280        next_section = (unsigned)(next_node - ic->journal_tree) / ic->journal_section_entries;
1281        if (next_section >= ic->committed_section &&
1282            next_section < ic->committed_section + ic->n_committed_sections)
1283                return true;
1284        if (next_section + ic->journal_sections < ic->committed_section + ic->n_committed_sections)
1285                return true;
1286
1287        return false;
1288}
1289
1290#define TAG_READ        0
1291#define TAG_WRITE       1
1292#define TAG_CMP         2
1293
1294static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
1295                               unsigned *metadata_offset, unsigned total_size, int op)
1296{
1297        do {
1298                unsigned char *data, *dp;
1299                struct dm_buffer *b;
1300                unsigned to_copy;
1301                int r;
1302
1303                r = dm_integrity_failed(ic);
1304                if (unlikely(r))
1305                        return r;
1306
1307                data = dm_bufio_read(ic->bufio, *metadata_block, &b);
1308                if (IS_ERR(data))
1309                        return PTR_ERR(data);
1310
1311                to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size);
1312                dp = data + *metadata_offset;
1313                if (op == TAG_READ) {
1314                        memcpy(tag, dp, to_copy);
1315                } else if (op == TAG_WRITE) {
1316                        memcpy(dp, tag, to_copy);
1317                        dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy);
1318                } else  {
1319                        /* e.g.: op == TAG_CMP */
1320                        if (unlikely(memcmp(dp, tag, to_copy))) {
1321                                unsigned i;
1322
1323                                for (i = 0; i < to_copy; i++) {
1324                                        if (dp[i] != tag[i])
1325                                                break;
1326                                        total_size--;
1327                                }
1328                                dm_bufio_release(b);
1329                                return total_size;
1330                        }
1331                }
1332                dm_bufio_release(b);
1333
1334                tag += to_copy;
1335                *metadata_offset += to_copy;
1336                if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) {
1337                        (*metadata_block)++;
1338                        *metadata_offset = 0;
1339                }
1340                total_size -= to_copy;
1341        } while (unlikely(total_size));
1342
1343        return 0;
1344}
1345
1346static void dm_integrity_flush_buffers(struct dm_integrity_c *ic)
1347{
1348        int r;
1349        r = dm_bufio_write_dirty_buffers(ic->bufio);
1350        if (unlikely(r))
1351                dm_integrity_io_error(ic, "writing tags", r);
1352}
1353
1354static void sleep_on_endio_wait(struct dm_integrity_c *ic)
1355{
1356        DECLARE_WAITQUEUE(wait, current);
1357        __add_wait_queue(&ic->endio_wait, &wait);
1358        __set_current_state(TASK_UNINTERRUPTIBLE);
1359        spin_unlock_irq(&ic->endio_wait.lock);
1360        io_schedule();
1361        spin_lock_irq(&ic->endio_wait.lock);
1362        __remove_wait_queue(&ic->endio_wait, &wait);
1363}
1364
1365static void autocommit_fn(struct timer_list *t)
1366{
1367        struct dm_integrity_c *ic = from_timer(ic, t, autocommit_timer);
1368
1369        if (likely(!dm_integrity_failed(ic)))
1370                queue_work(ic->commit_wq, &ic->commit_work);
1371}
1372
1373static void schedule_autocommit(struct dm_integrity_c *ic)
1374{
1375        if (!timer_pending(&ic->autocommit_timer))
1376                mod_timer(&ic->autocommit_timer, jiffies + ic->autocommit_jiffies);
1377}
1378
1379static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1380{
1381        struct bio *bio;
1382        unsigned long flags;
1383
1384        spin_lock_irqsave(&ic->endio_wait.lock, flags);
1385        bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1386        bio_list_add(&ic->flush_bio_list, bio);
1387        spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1388
1389        queue_work(ic->commit_wq, &ic->commit_work);
1390}
1391
1392static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
1393{
1394        int r = dm_integrity_failed(ic);
1395        if (unlikely(r) && !bio->bi_status)
1396                bio->bi_status = errno_to_blk_status(r);
1397        if (unlikely(ic->synchronous_mode) && bio_op(bio) == REQ_OP_WRITE) {
1398                unsigned long flags;
1399                spin_lock_irqsave(&ic->endio_wait.lock, flags);
1400                bio_list_add(&ic->synchronous_bios, bio);
1401                queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
1402                spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1403                return;
1404        }
1405        bio_endio(bio);
1406}
1407
1408static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1409{
1410        struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1411
1412        if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic)))
1413                submit_flush_bio(ic, dio);
1414        else
1415                do_endio(ic, bio);
1416}
1417
1418static void dec_in_flight(struct dm_integrity_io *dio)
1419{
1420        if (atomic_dec_and_test(&dio->in_flight)) {
1421                struct dm_integrity_c *ic = dio->ic;
1422                struct bio *bio;
1423
1424                remove_range(ic, &dio->range);
1425
1426                if (unlikely(dio->write))
1427                        schedule_autocommit(ic);
1428
1429                bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1430
1431                if (unlikely(dio->bi_status) && !bio->bi_status)
1432                        bio->bi_status = dio->bi_status;
1433                if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
1434                        dio->range.logical_sector += dio->range.n_sectors;
1435                        bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
1436                        INIT_WORK(&dio->work, integrity_bio_wait);
1437                        queue_work(ic->wait_wq, &dio->work);
1438                        return;
1439                }
1440                do_endio_flush(ic, dio);
1441        }
1442}
1443
1444static void integrity_end_io(struct bio *bio)
1445{
1446        struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1447
1448        bio->bi_iter = dio->orig_bi_iter;
1449        bio->bi_disk = dio->orig_bi_disk;
1450        bio->bi_partno = dio->orig_bi_partno;
1451        if (dio->orig_bi_integrity) {
1452                bio->bi_integrity = dio->orig_bi_integrity;
1453                bio->bi_opf |= REQ_INTEGRITY;
1454        }
1455        bio->bi_end_io = dio->orig_bi_end_io;
1456
1457        if (dio->completion)
1458                complete(dio->completion);
1459
1460        dec_in_flight(dio);
1461}
1462
1463static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector,
1464                                      const char *data, char *result)
1465{
1466        __u64 sector_le = cpu_to_le64(sector);
1467        SHASH_DESC_ON_STACK(req, ic->internal_hash);
1468        int r;
1469        unsigned digest_size;
1470
1471        req->tfm = ic->internal_hash;
1472
1473        r = crypto_shash_init(req);
1474        if (unlikely(r < 0)) {
1475                dm_integrity_io_error(ic, "crypto_shash_init", r);
1476                goto failed;
1477        }
1478
1479        r = crypto_shash_update(req, (const __u8 *)&sector_le, sizeof sector_le);
1480        if (unlikely(r < 0)) {
1481                dm_integrity_io_error(ic, "crypto_shash_update", r);
1482                goto failed;
1483        }
1484
1485        r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT);
1486        if (unlikely(r < 0)) {
1487                dm_integrity_io_error(ic, "crypto_shash_update", r);
1488                goto failed;
1489        }
1490
1491        r = crypto_shash_final(req, result);
1492        if (unlikely(r < 0)) {
1493                dm_integrity_io_error(ic, "crypto_shash_final", r);
1494                goto failed;
1495        }
1496
1497        digest_size = crypto_shash_digestsize(ic->internal_hash);
1498        if (unlikely(digest_size < ic->tag_size))
1499                memset(result + digest_size, 0, ic->tag_size - digest_size);
1500
1501        return;
1502
1503failed:
1504        /* this shouldn't happen anyway, the hash functions have no reason to fail */
1505        get_random_bytes(result, ic->tag_size);
1506}
1507
1508static void integrity_metadata(struct work_struct *w)
1509{
1510        struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
1511        struct dm_integrity_c *ic = dio->ic;
1512
1513        int r;
1514
1515        if (ic->internal_hash) {
1516                struct bvec_iter iter;
1517                struct bio_vec bv;
1518                unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1519                struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1520                char *checksums;
1521                unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
1522                char checksums_onstack[HASH_MAX_DIGESTSIZE];
1523                unsigned sectors_to_process = dio->range.n_sectors;
1524                sector_t sector = dio->range.logical_sector;
1525
1526                if (unlikely(ic->mode == 'R'))
1527                        goto skip_io;
1528
1529                checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space,
1530                                    GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
1531                if (!checksums) {
1532                        checksums = checksums_onstack;
1533                        if (WARN_ON(extra_space &&
1534                                    digest_size > sizeof(checksums_onstack))) {
1535                                r = -EINVAL;
1536                                goto error;
1537                        }
1538                }
1539
1540                __bio_for_each_segment(bv, bio, iter, dio->orig_bi_iter) {
1541                        unsigned pos;
1542                        char *mem, *checksums_ptr;
1543
1544again:
1545                        mem = (char *)kmap_atomic(bv.bv_page) + bv.bv_offset;
1546                        pos = 0;
1547                        checksums_ptr = checksums;
1548                        do {
1549                                integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr);
1550                                checksums_ptr += ic->tag_size;
1551                                sectors_to_process -= ic->sectors_per_block;
1552                                pos += ic->sectors_per_block << SECTOR_SHIFT;
1553                                sector += ic->sectors_per_block;
1554                        } while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack);
1555                        kunmap_atomic(mem);
1556
1557                        r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1558                                                checksums_ptr - checksums, !dio->write ? TAG_CMP : TAG_WRITE);
1559                        if (unlikely(r)) {
1560                                if (r > 0) {
1561                                        DMERR_LIMIT("Checksum failed at sector 0x%llx",
1562                                                    (unsigned long long)(sector - ((r + ic->tag_size - 1) / ic->tag_size)));
1563                                        r = -EILSEQ;
1564                                        atomic64_inc(&ic->number_of_mismatches);
1565                                }
1566                                if (likely(checksums != checksums_onstack))
1567                                        kfree(checksums);
1568                                goto error;
1569                        }
1570
1571                        if (!sectors_to_process)
1572                                break;
1573
1574                        if (unlikely(pos < bv.bv_len)) {
1575                                bv.bv_offset += pos;
1576                                bv.bv_len -= pos;
1577                                goto again;
1578                        }
1579                }
1580
1581                if (likely(checksums != checksums_onstack))
1582                        kfree(checksums);
1583        } else {
1584                struct bio_integrity_payload *bip = dio->orig_bi_integrity;
1585
1586                if (bip) {
1587                        struct bio_vec biv;
1588                        struct bvec_iter iter;
1589                        unsigned data_to_process = dio->range.n_sectors;
1590                        sector_to_block(ic, data_to_process);
1591                        data_to_process *= ic->tag_size;
1592
1593                        bip_for_each_vec(biv, bip, iter) {
1594                                unsigned char *tag;
1595                                unsigned this_len;
1596
1597                                BUG_ON(PageHighMem(biv.bv_page));
1598                                tag = lowmem_page_address(biv.bv_page) + biv.bv_offset;
1599                                this_len = min(biv.bv_len, data_to_process);
1600                                r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset,
1601                                                        this_len, !dio->write ? TAG_READ : TAG_WRITE);
1602                                if (unlikely(r))
1603                                        goto error;
1604                                data_to_process -= this_len;
1605                                if (!data_to_process)
1606                                        break;
1607                        }
1608                }
1609        }
1610skip_io:
1611        dec_in_flight(dio);
1612        return;
1613error:
1614        dio->bi_status = errno_to_blk_status(r);
1615        dec_in_flight(dio);
1616}
1617
1618static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
1619{
1620        struct dm_integrity_c *ic = ti->private;
1621        struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1622        struct bio_integrity_payload *bip;
1623
1624        sector_t area, offset;
1625
1626        dio->ic = ic;
1627        dio->bi_status = 0;
1628
1629        if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1630                submit_flush_bio(ic, dio);
1631                return DM_MAPIO_SUBMITTED;
1632        }
1633
1634        dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1635        dio->write = bio_op(bio) == REQ_OP_WRITE;
1636        dio->fua = dio->write && bio->bi_opf & REQ_FUA;
1637        if (unlikely(dio->fua)) {
1638                /*
1639                 * Don't pass down the FUA flag because we have to flush
1640                 * disk cache anyway.
1641                 */
1642                bio->bi_opf &= ~REQ_FUA;
1643        }
1644        if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) {
1645                DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx",
1646                      (unsigned long long)dio->range.logical_sector, bio_sectors(bio),
1647                      (unsigned long long)ic->provided_data_sectors);
1648                return DM_MAPIO_KILL;
1649        }
1650        if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) {
1651                DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
1652                      ic->sectors_per_block,
1653                      (unsigned long long)dio->range.logical_sector, bio_sectors(bio));
1654                return DM_MAPIO_KILL;
1655        }
1656
1657        if (ic->sectors_per_block > 1) {
1658                struct bvec_iter iter;
1659                struct bio_vec bv;
1660                bio_for_each_segment(bv, bio, iter) {
1661                        if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
1662                                DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
1663                                        bv.bv_offset, bv.bv_len, ic->sectors_per_block);
1664                                return DM_MAPIO_KILL;
1665                        }
1666                }
1667        }
1668
1669        bip = bio_integrity(bio);
1670        if (!ic->internal_hash) {
1671                if (bip) {
1672                        unsigned wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block;
1673                        if (ic->log2_tag_size >= 0)
1674                                wanted_tag_size <<= ic->log2_tag_size;
1675                        else
1676                                wanted_tag_size *= ic->tag_size;
1677                        if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) {
1678                                DMERR("Invalid integrity data size %u, expected %u",
1679                                      bip->bip_iter.bi_size, wanted_tag_size);
1680                                return DM_MAPIO_KILL;
1681                        }
1682                }
1683        } else {
1684                if (unlikely(bip != NULL)) {
1685                        DMERR("Unexpected integrity data when using internal hash");
1686                        return DM_MAPIO_KILL;
1687                }
1688        }
1689
1690        if (unlikely(ic->mode == 'R') && unlikely(dio->write))
1691                return DM_MAPIO_KILL;
1692
1693        get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1694        dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1695        bio->bi_iter.bi_sector = get_data_sector(ic, area, offset);
1696
1697        dm_integrity_map_continue(dio, true);
1698        return DM_MAPIO_SUBMITTED;
1699}
1700
1701static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
1702                                 unsigned journal_section, unsigned journal_entry)
1703{
1704        struct dm_integrity_c *ic = dio->ic;
1705        sector_t logical_sector;
1706        unsigned n_sectors;
1707
1708        logical_sector = dio->range.logical_sector;
1709        n_sectors = dio->range.n_sectors;
1710        do {
1711                struct bio_vec bv = bio_iovec(bio);
1712                char *mem;
1713
1714                if (unlikely(bv.bv_len >> SECTOR_SHIFT > n_sectors))
1715                        bv.bv_len = n_sectors << SECTOR_SHIFT;
1716                n_sectors -= bv.bv_len >> SECTOR_SHIFT;
1717                bio_advance_iter(bio, &bio->bi_iter, bv.bv_len);
1718retry_kmap:
1719                mem = kmap_atomic(bv.bv_page);
1720                if (likely(dio->write))
1721                        flush_dcache_page(bv.bv_page);
1722
1723                do {
1724                        struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry);
1725
1726                        if (unlikely(!dio->write)) {
1727                                struct journal_sector *js;
1728                                char *mem_ptr;
1729                                unsigned s;
1730
1731                                if (unlikely(journal_entry_is_inprogress(je))) {
1732                                        flush_dcache_page(bv.bv_page);
1733                                        kunmap_atomic(mem);
1734
1735                                        __io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
1736                                        goto retry_kmap;
1737                                }
1738                                smp_rmb();
1739                                BUG_ON(journal_entry_get_sector(je) != logical_sector);
1740                                js = access_journal_data(ic, journal_section, journal_entry);
1741                                mem_ptr = mem + bv.bv_offset;
1742                                s = 0;
1743                                do {
1744                                        memcpy(mem_ptr, js, JOURNAL_SECTOR_DATA);
1745                                        *(commit_id_t *)(mem_ptr + JOURNAL_SECTOR_DATA) = je->last_bytes[s];
1746                                        js++;
1747                                        mem_ptr += 1 << SECTOR_SHIFT;
1748                                } while (++s < ic->sectors_per_block);
1749#ifdef INTERNAL_VERIFY
1750                                if (ic->internal_hash) {
1751                                        char checksums_onstack[max(HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
1752
1753                                        integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
1754                                        if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
1755                                                DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx",
1756                                                            (unsigned long long)logical_sector);
1757                                        }
1758                                }
1759#endif
1760                        }
1761
1762                        if (!ic->internal_hash) {
1763                                struct bio_integrity_payload *bip = bio_integrity(bio);
1764                                unsigned tag_todo = ic->tag_size;
1765                                char *tag_ptr = journal_entry_tag(ic, je);
1766
1767                                if (bip) do {
1768                                        struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
1769                                        unsigned tag_now = min(biv.bv_len, tag_todo);
1770                                        char *tag_addr;
1771                                        BUG_ON(PageHighMem(biv.bv_page));
1772                                        tag_addr = lowmem_page_address(biv.bv_page) + biv.bv_offset;
1773                                        if (likely(dio->write))
1774                                                memcpy(tag_ptr, tag_addr, tag_now);
1775                                        else
1776                                                memcpy(tag_addr, tag_ptr, tag_now);
1777                                        bvec_iter_advance(bip->bip_vec, &bip->bip_iter, tag_now);
1778                                        tag_ptr += tag_now;
1779                                        tag_todo -= tag_now;
1780                                } while (unlikely(tag_todo)); else {
1781                                        if (likely(dio->write))
1782                                                memset(tag_ptr, 0, tag_todo);
1783                                }
1784                        }
1785
1786                        if (likely(dio->write)) {
1787                                struct journal_sector *js;
1788                                unsigned s;
1789
1790                                js = access_journal_data(ic, journal_section, journal_entry);
1791                                memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT);
1792
1793                                s = 0;
1794                                do {
1795                                        je->last_bytes[s] = js[s].commit_id;
1796                                } while (++s < ic->sectors_per_block);
1797
1798                                if (ic->internal_hash) {
1799                                        unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1800                                        if (unlikely(digest_size > ic->tag_size)) {
1801                                                char checksums_onstack[HASH_MAX_DIGESTSIZE];
1802                                                integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
1803                                                memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size);
1804                                        } else
1805                                                integrity_sector_checksum(ic, logical_sector, (char *)js, journal_entry_tag(ic, je));
1806                                }
1807
1808                                journal_entry_set_sector(je, logical_sector);
1809                        }
1810                        logical_sector += ic->sectors_per_block;
1811
1812                        journal_entry++;
1813                        if (unlikely(journal_entry == ic->journal_section_entries)) {
1814                                journal_entry = 0;
1815                                journal_section++;
1816                                wraparound_section(ic, &journal_section);
1817                        }
1818
1819                        bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT;
1820                } while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT);
1821
1822                if (unlikely(!dio->write))
1823                        flush_dcache_page(bv.bv_page);
1824                kunmap_atomic(mem);
1825        } while (n_sectors);
1826
1827        if (likely(dio->write)) {
1828                smp_mb();
1829                if (unlikely(waitqueue_active(&ic->copy_to_journal_wait)))
1830                        wake_up(&ic->copy_to_journal_wait);
1831                if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) {
1832                        queue_work(ic->commit_wq, &ic->commit_work);
1833                } else {
1834                        schedule_autocommit(ic);
1835                }
1836        } else {
1837                remove_range(ic, &dio->range);
1838        }
1839
1840        if (unlikely(bio->bi_iter.bi_size)) {
1841                sector_t area, offset;
1842
1843                dio->range.logical_sector = logical_sector;
1844                get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1845                dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1846                return true;
1847        }
1848
1849        return false;
1850}
1851
1852static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map)
1853{
1854        struct dm_integrity_c *ic = dio->ic;
1855        struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1856        unsigned journal_section, journal_entry;
1857        unsigned journal_read_pos;
1858        struct completion read_comp;
1859        bool need_sync_io = ic->internal_hash && !dio->write;
1860
1861        if (need_sync_io && from_map) {
1862                INIT_WORK(&dio->work, integrity_bio_wait);
1863                queue_work(ic->metadata_wq, &dio->work);
1864                return;
1865        }
1866
1867lock_retry:
1868        spin_lock_irq(&ic->endio_wait.lock);
1869retry:
1870        if (unlikely(dm_integrity_failed(ic))) {
1871                spin_unlock_irq(&ic->endio_wait.lock);
1872                do_endio(ic, bio);
1873                return;
1874        }
1875        dio->range.n_sectors = bio_sectors(bio);
1876        journal_read_pos = NOT_FOUND;
1877        if (likely(ic->mode == 'J')) {
1878                if (dio->write) {
1879                        unsigned next_entry, i, pos;
1880                        unsigned ws, we, range_sectors;
1881
1882                        dio->range.n_sectors = min(dio->range.n_sectors,
1883                                                   (sector_t)ic->free_sectors << ic->sb->log2_sectors_per_block);
1884                        if (unlikely(!dio->range.n_sectors)) {
1885                                if (from_map)
1886                                        goto offload_to_thread;
1887                                sleep_on_endio_wait(ic);
1888                                goto retry;
1889                        }
1890                        range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block;
1891                        ic->free_sectors -= range_sectors;
1892                        journal_section = ic->free_section;
1893                        journal_entry = ic->free_section_entry;
1894
1895                        next_entry = ic->free_section_entry + range_sectors;
1896                        ic->free_section_entry = next_entry % ic->journal_section_entries;
1897                        ic->free_section += next_entry / ic->journal_section_entries;
1898                        ic->n_uncommitted_sections += next_entry / ic->journal_section_entries;
1899                        wraparound_section(ic, &ic->free_section);
1900
1901                        pos = journal_section * ic->journal_section_entries + journal_entry;
1902                        ws = journal_section;
1903                        we = journal_entry;
1904                        i = 0;
1905                        do {
1906                                struct journal_entry *je;
1907
1908                                add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i);
1909                                pos++;
1910                                if (unlikely(pos >= ic->journal_entries))
1911                                        pos = 0;
1912
1913                                je = access_journal_entry(ic, ws, we);
1914                                BUG_ON(!journal_entry_is_unused(je));
1915                                journal_entry_set_inprogress(je);
1916                                we++;
1917                                if (unlikely(we == ic->journal_section_entries)) {
1918                                        we = 0;
1919                                        ws++;
1920                                        wraparound_section(ic, &ws);
1921                                }
1922                        } while ((i += ic->sectors_per_block) < dio->range.n_sectors);
1923
1924                        spin_unlock_irq(&ic->endio_wait.lock);
1925                        goto journal_read_write;
1926                } else {
1927                        sector_t next_sector;
1928                        journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
1929                        if (likely(journal_read_pos == NOT_FOUND)) {
1930                                if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector))
1931                                        dio->range.n_sectors = next_sector - dio->range.logical_sector;
1932                        } else {
1933                                unsigned i;
1934                                unsigned jp = journal_read_pos + 1;
1935                                for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) {
1936                                        if (!test_journal_node(ic, jp, dio->range.logical_sector + i))
1937                                                break;
1938                                }
1939                                dio->range.n_sectors = i;
1940                        }
1941                }
1942        }
1943        if (unlikely(!add_new_range(ic, &dio->range, true))) {
1944                /*
1945                 * We must not sleep in the request routine because it could
1946                 * stall bios on current->bio_list.
1947                 * So, we offload the bio to a workqueue if we have to sleep.
1948                 */
1949                if (from_map) {
1950offload_to_thread:
1951                        spin_unlock_irq(&ic->endio_wait.lock);
1952                        INIT_WORK(&dio->work, integrity_bio_wait);
1953                        queue_work(ic->wait_wq, &dio->work);
1954                        return;
1955                }
1956                if (journal_read_pos != NOT_FOUND)
1957                        dio->range.n_sectors = ic->sectors_per_block;
1958                wait_and_add_new_range(ic, &dio->range);
1959                /*
1960                 * wait_and_add_new_range drops the spinlock, so the journal
1961                 * may have been changed arbitrarily. We need to recheck.
1962                 * To simplify the code, we restrict I/O size to just one block.
1963                 */
1964                if (journal_read_pos != NOT_FOUND) {
1965                        sector_t next_sector;
1966                        unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
1967                        if (unlikely(new_pos != journal_read_pos)) {
1968                                remove_range_unlocked(ic, &dio->range);
1969                                goto retry;
1970                        }
1971                }
1972        }
1973        spin_unlock_irq(&ic->endio_wait.lock);
1974
1975        if (unlikely(journal_read_pos != NOT_FOUND)) {
1976                journal_section = journal_read_pos / ic->journal_section_entries;
1977                journal_entry = journal_read_pos % ic->journal_section_entries;
1978                goto journal_read_write;
1979        }
1980
1981        if (ic->mode == 'B' && dio->write) {
1982                if (!block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
1983                                     dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
1984                        struct bitmap_block_status *bbs;
1985
1986                        bbs = sector_to_bitmap_block(ic, dio->range.logical_sector);
1987                        spin_lock(&bbs->bio_queue_lock);
1988                        bio_list_add(&bbs->bio_queue, bio);
1989                        spin_unlock(&bbs->bio_queue_lock);
1990                        queue_work(ic->writer_wq, &bbs->work);
1991                        return;
1992                }
1993        }
1994
1995        dio->in_flight = (atomic_t)ATOMIC_INIT(2);
1996
1997        if (need_sync_io) {
1998                init_completion(&read_comp);
1999                dio->completion = &read_comp;
2000        } else
2001                dio->completion = NULL;
2002
2003        dio->orig_bi_iter = bio->bi_iter;
2004
2005        dio->orig_bi_disk = bio->bi_disk;
2006        dio->orig_bi_partno = bio->bi_partno;
2007        bio_set_dev(bio, ic->dev->bdev);
2008
2009        dio->orig_bi_integrity = bio_integrity(bio);
2010        bio->bi_integrity = NULL;
2011        bio->bi_opf &= ~REQ_INTEGRITY;
2012
2013        dio->orig_bi_end_io = bio->bi_end_io;
2014        bio->bi_end_io = integrity_end_io;
2015
2016        bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
2017        generic_make_request(bio);
2018
2019        if (need_sync_io) {
2020                wait_for_completion_io(&read_comp);
2021                if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
2022                    dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector))
2023                        goto skip_check;
2024                if (ic->mode == 'B') {
2025                        if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector,
2026                                             dio->range.n_sectors, BITMAP_OP_TEST_ALL_CLEAR))
2027                                goto skip_check;
2028                }
2029
2030                if (likely(!bio->bi_status))
2031                        integrity_metadata(&dio->work);
2032                else
2033skip_check:
2034                        dec_in_flight(dio);
2035
2036        } else {
2037                INIT_WORK(&dio->work, integrity_metadata);
2038                queue_work(ic->metadata_wq, &dio->work);
2039        }
2040
2041        return;
2042
2043journal_read_write:
2044        if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry)))
2045                goto lock_retry;
2046
2047        do_endio_flush(ic, dio);
2048}
2049
2050
2051static void integrity_bio_wait(struct work_struct *w)
2052{
2053        struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
2054
2055        dm_integrity_map_continue(dio, false);
2056}
2057
2058static void pad_uncommitted(struct dm_integrity_c *ic)
2059{
2060        if (ic->free_section_entry) {
2061                ic->free_sectors -= ic->journal_section_entries - ic->free_section_entry;
2062                ic->free_section_entry = 0;
2063                ic->free_section++;
2064                wraparound_section(ic, &ic->free_section);
2065                ic->n_uncommitted_sections++;
2066        }
2067        if (WARN_ON(ic->journal_sections * ic->journal_section_entries !=
2068                    (ic->n_uncommitted_sections + ic->n_committed_sections) *
2069                    ic->journal_section_entries + ic->free_sectors)) {
2070                DMCRIT("journal_sections %u, journal_section_entries %u, "
2071                       "n_uncommitted_sections %u, n_committed_sections %u, "
2072                       "journal_section_entries %u, free_sectors %u",
2073                       ic->journal_sections, ic->journal_section_entries,
2074                       ic->n_uncommitted_sections, ic->n_committed_sections,
2075                       ic->journal_section_entries, ic->free_sectors);
2076        }
2077}
2078
2079static void integrity_commit(struct work_struct *w)
2080{
2081        struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work);
2082        unsigned commit_start, commit_sections;
2083        unsigned i, j, n;
2084        struct bio *flushes;
2085
2086        del_timer(&ic->autocommit_timer);
2087
2088        spin_lock_irq(&ic->endio_wait.lock);
2089        flushes = bio_list_get(&ic->flush_bio_list);
2090        if (unlikely(ic->mode != 'J')) {
2091                spin_unlock_irq(&ic->endio_wait.lock);
2092                dm_integrity_flush_buffers(ic);
2093                goto release_flush_bios;
2094        }
2095
2096        pad_uncommitted(ic);
2097        commit_start = ic->uncommitted_section;
2098        commit_sections = ic->n_uncommitted_sections;
2099        spin_unlock_irq(&ic->endio_wait.lock);
2100
2101        if (!commit_sections)
2102                goto release_flush_bios;
2103
2104        i = commit_start;
2105        for (n = 0; n < commit_sections; n++) {
2106                for (j = 0; j < ic->journal_section_entries; j++) {
2107                        struct journal_entry *je;
2108                        je = access_journal_entry(ic, i, j);
2109                        io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
2110                }
2111                for (j = 0; j < ic->journal_section_sectors; j++) {
2112                        struct journal_sector *js;
2113                        js = access_journal(ic, i, j);
2114                        js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq);
2115                }
2116                i++;
2117                if (unlikely(i >= ic->journal_sections))
2118                        ic->commit_seq = next_commit_seq(ic->commit_seq);
2119                wraparound_section(ic, &i);
2120        }
2121        smp_rmb();
2122
2123        write_journal(ic, commit_start, commit_sections);
2124
2125        spin_lock_irq(&ic->endio_wait.lock);
2126        ic->uncommitted_section += commit_sections;
2127        wraparound_section(ic, &ic->uncommitted_section);
2128        ic->n_uncommitted_sections -= commit_sections;
2129        ic->n_committed_sections += commit_sections;
2130        spin_unlock_irq(&ic->endio_wait.lock);
2131
2132        if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
2133                queue_work(ic->writer_wq, &ic->writer_work);
2134
2135release_flush_bios:
2136        while (flushes) {
2137                struct bio *next = flushes->bi_next;
2138                flushes->bi_next = NULL;
2139                do_endio(ic, flushes);
2140                flushes = next;
2141        }
2142}
2143
2144static void complete_copy_from_journal(unsigned long error, void *context)
2145{
2146        struct journal_io *io = context;
2147        struct journal_completion *comp = io->comp;
2148        struct dm_integrity_c *ic = comp->ic;
2149        remove_range(ic, &io->range);
2150        mempool_free(io, &ic->journal_io_mempool);
2151        if (unlikely(error != 0))
2152                dm_integrity_io_error(ic, "copying from journal", -EIO);
2153        complete_journal_op(comp);
2154}
2155
2156static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js,
2157                               struct journal_entry *je)
2158{
2159        unsigned s = 0;
2160        do {
2161                js->commit_id = je->last_bytes[s];
2162                js++;
2163        } while (++s < ic->sectors_per_block);
2164}
2165
2166static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
2167                             unsigned write_sections, bool from_replay)
2168{
2169        unsigned i, j, n;
2170        struct journal_completion comp;
2171        struct blk_plug plug;
2172
2173        blk_start_plug(&plug);
2174
2175        comp.ic = ic;
2176        comp.in_flight = (atomic_t)ATOMIC_INIT(1);
2177        init_completion(&comp.comp);
2178
2179        i = write_start;
2180        for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) {
2181#ifndef INTERNAL_VERIFY
2182                if (unlikely(from_replay))
2183#endif
2184                        rw_section_mac(ic, i, false);
2185                for (j = 0; j < ic->journal_section_entries; j++) {
2186                        struct journal_entry *je = access_journal_entry(ic, i, j);
2187                        sector_t sec, area, offset;
2188                        unsigned k, l, next_loop;
2189                        sector_t metadata_block;
2190                        unsigned metadata_offset;
2191                        struct journal_io *io;
2192
2193                        if (journal_entry_is_unused(je))
2194                                continue;
2195                        BUG_ON(unlikely(journal_entry_is_inprogress(je)) && !from_replay);
2196                        sec = journal_entry_get_sector(je);
2197                        if (unlikely(from_replay)) {
2198                                if (unlikely(sec & (unsigned)(ic->sectors_per_block - 1))) {
2199                                        dm_integrity_io_error(ic, "invalid sector in journal", -EIO);
2200                                        sec &= ~(sector_t)(ic->sectors_per_block - 1);
2201                                }
2202                        }
2203                        get_area_and_offset(ic, sec, &area, &offset);
2204                        restore_last_bytes(ic, access_journal_data(ic, i, j), je);
2205                        for (k = j + 1; k < ic->journal_section_entries; k++) {
2206                                struct journal_entry *je2 = access_journal_entry(ic, i, k);
2207                                sector_t sec2, area2, offset2;
2208                                if (journal_entry_is_unused(je2))
2209                                        break;
2210                                BUG_ON(unlikely(journal_entry_is_inprogress(je2)) && !from_replay);
2211                                sec2 = journal_entry_get_sector(je2);
2212                                get_area_and_offset(ic, sec2, &area2, &offset2);
2213                                if (area2 != area || offset2 != offset + ((k - j) << ic->sb->log2_sectors_per_block))
2214                                        break;
2215                                restore_last_bytes(ic, access_journal_data(ic, i, k), je2);
2216                        }
2217                        next_loop = k - 1;
2218
2219                        io = mempool_alloc(&ic->journal_io_mempool, GFP_NOIO);
2220                        io->comp = &comp;
2221                        io->range.logical_sector = sec;
2222                        io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block;
2223
2224                        spin_lock_irq(&ic->endio_wait.lock);
2225                        add_new_range_and_wait(ic, &io->range);
2226
2227                        if (likely(!from_replay)) {
2228                                struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries];
2229
2230                                /* don't write if there is newer committed sector */
2231                                while (j < k && find_newer_committed_node(ic, &section_node[j])) {
2232                                        struct journal_entry *je2 = access_journal_entry(ic, i, j);
2233
2234                                        journal_entry_set_unused(je2);
2235                                        remove_journal_node(ic, &section_node[j]);
2236                                        j++;
2237                                        sec += ic->sectors_per_block;
2238                                        offset += ic->sectors_per_block;
2239                                }
2240                                while (j < k && find_newer_committed_node(ic, &section_node[k - 1])) {
2241                                        struct journal_entry *je2 = access_journal_entry(ic, i, k - 1);
2242
2243                                        journal_entry_set_unused(je2);
2244                                        remove_journal_node(ic, &section_node[k - 1]);
2245                                        k--;
2246                                }
2247                                if (j == k) {
2248                                        remove_range_unlocked(ic, &io->range);
2249                                        spin_unlock_irq(&ic->endio_wait.lock);
2250                                        mempool_free(io, &ic->journal_io_mempool);
2251                                        goto skip_io;
2252                                }
2253                                for (l = j; l < k; l++) {
2254                                        remove_journal_node(ic, &section_node[l]);
2255                                }
2256                        }
2257                        spin_unlock_irq(&ic->endio_wait.lock);
2258
2259                        metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2260                        for (l = j; l < k; l++) {
2261                                int r;
2262                                struct journal_entry *je2 = access_journal_entry(ic, i, l);
2263
2264                                if (
2265#ifndef INTERNAL_VERIFY
2266                                    unlikely(from_replay) &&
2267#endif
2268                                    ic->internal_hash) {
2269                                        char test_tag[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
2270
2271                                        integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
2272                                                                  (char *)access_journal_data(ic, i, l), test_tag);
2273                                        if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size)))
2274                                                dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ);
2275                                }
2276
2277                                journal_entry_set_unused(je2);
2278                                r = dm_integrity_rw_tag(ic, journal_entry_tag(ic, je2), &metadata_block, &metadata_offset,
2279                                                        ic->tag_size, TAG_WRITE);
2280                                if (unlikely(r)) {
2281                                        dm_integrity_io_error(ic, "reading tags", r);
2282                                }
2283                        }
2284
2285                        atomic_inc(&comp.in_flight);
2286                        copy_from_journal(ic, i, j << ic->sb->log2_sectors_per_block,
2287                                          (k - j) << ic->sb->log2_sectors_per_block,
2288                                          get_data_sector(ic, area, offset),
2289                                          complete_copy_from_journal, io);
2290skip_io:
2291                        j = next_loop;
2292                }
2293        }
2294
2295        dm_bufio_write_dirty_buffers_async(ic->bufio);
2296
2297        blk_finish_plug(&plug);
2298
2299        complete_journal_op(&comp);
2300        wait_for_completion_io(&comp.comp);
2301
2302        dm_integrity_flush_buffers(ic);
2303}
2304
2305static void integrity_writer(struct work_struct *w)
2306{
2307        struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work);
2308        unsigned write_start, write_sections;
2309
2310        unsigned prev_free_sectors;
2311
2312        /* the following test is not needed, but it tests the replay code */
2313        if (READ_ONCE(ic->suspending) && !ic->meta_dev)
2314                return;
2315
2316        spin_lock_irq(&ic->endio_wait.lock);
2317        write_start = ic->committed_section;
2318        write_sections = ic->n_committed_sections;
2319        spin_unlock_irq(&ic->endio_wait.lock);
2320
2321        if (!write_sections)
2322                return;
2323
2324        do_journal_write(ic, write_start, write_sections, false);
2325
2326        spin_lock_irq(&ic->endio_wait.lock);
2327
2328        ic->committed_section += write_sections;
2329        wraparound_section(ic, &ic->committed_section);
2330        ic->n_committed_sections -= write_sections;
2331
2332        prev_free_sectors = ic->free_sectors;
2333        ic->free_sectors += write_sections * ic->journal_section_entries;
2334        if (unlikely(!prev_free_sectors))
2335                wake_up_locked(&ic->endio_wait);
2336
2337        spin_unlock_irq(&ic->endio_wait.lock);
2338}
2339
2340static void recalc_write_super(struct dm_integrity_c *ic)
2341{
2342        int r;
2343
2344        dm_integrity_flush_buffers(ic);
2345        if (dm_integrity_failed(ic))
2346                return;
2347
2348        r = sync_rw_sb(ic, REQ_OP_WRITE, 0);
2349        if (unlikely(r))
2350                dm_integrity_io_error(ic, "writing superblock", r);
2351}
2352
2353static void integrity_recalc(struct work_struct *w)
2354{
2355        struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work);
2356        struct dm_integrity_range range;
2357        struct dm_io_request io_req;
2358        struct dm_io_region io_loc;
2359        sector_t area, offset;
2360        sector_t metadata_block;
2361        unsigned metadata_offset;
2362        sector_t logical_sector, n_sectors;
2363        __u8 *t;
2364        unsigned i;
2365        int r;
2366        unsigned super_counter = 0;
2367
2368        DEBUG_print("start recalculation... (position %llx)\n", le64_to_cpu(ic->sb->recalc_sector));
2369
2370        spin_lock_irq(&ic->endio_wait.lock);
2371
2372next_chunk:
2373
2374        if (unlikely(READ_ONCE(ic->suspending)))
2375                goto unlock_ret;
2376
2377        range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
2378        if (unlikely(range.logical_sector >= ic->provided_data_sectors)) {
2379                if (ic->mode == 'B') {
2380                        DEBUG_print("queue_delayed_work: bitmap_flush_work\n");
2381                        queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
2382                }
2383                goto unlock_ret;
2384        }
2385
2386        get_area_and_offset(ic, range.logical_sector, &area, &offset);
2387        range.n_sectors = min((sector_t)RECALC_SECTORS, ic->provided_data_sectors - range.logical_sector);
2388        if (!ic->meta_dev)
2389                range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned)offset);
2390
2391        add_new_range_and_wait(ic, &range);
2392        spin_unlock_irq(&ic->endio_wait.lock);
2393        logical_sector = range.logical_sector;
2394        n_sectors = range.n_sectors;
2395
2396        if (ic->mode == 'B') {
2397                if (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, n_sectors, BITMAP_OP_TEST_ALL_CLEAR)) {
2398                        goto advance_and_next;
2399                }
2400                while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector,
2401                                       ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
2402                        logical_sector += ic->sectors_per_block;
2403                        n_sectors -= ic->sectors_per_block;
2404                        cond_resched();
2405                }
2406                while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector + n_sectors - ic->sectors_per_block,
2407                                       ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
2408                        n_sectors -= ic->sectors_per_block;
2409                        cond_resched();
2410                }
2411                get_area_and_offset(ic, logical_sector, &area, &offset);
2412        }
2413
2414        DEBUG_print("recalculating: %lx, %lx\n", logical_sector, n_sectors);
2415
2416        if (unlikely(++super_counter == RECALC_WRITE_SUPER)) {
2417                recalc_write_super(ic);
2418                if (ic->mode == 'B') {
2419                        queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
2420                }
2421                super_counter = 0;
2422        }
2423
2424        if (unlikely(dm_integrity_failed(ic)))
2425                goto err;
2426
2427        io_req.bi_op = REQ_OP_READ;
2428        io_req.bi_op_flags = 0;
2429        io_req.mem.type = DM_IO_VMA;
2430        io_req.mem.ptr.addr = ic->recalc_buffer;
2431        io_req.notify.fn = NULL;
2432        io_req.client = ic->io;
2433        io_loc.bdev = ic->dev->bdev;
2434        io_loc.sector = get_data_sector(ic, area, offset);
2435        io_loc.count = n_sectors;
2436
2437        r = dm_io(&io_req, 1, &io_loc, NULL);
2438        if (unlikely(r)) {
2439                dm_integrity_io_error(ic, "reading data", r);
2440                goto err;
2441        }
2442
2443        t = ic->recalc_tags;
2444        for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
2445                integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
2446                t += ic->tag_size;
2447        }
2448
2449        metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2450
2451        r = dm_integrity_rw_tag(ic, ic->recalc_tags, &metadata_block, &metadata_offset, t - ic->recalc_tags, TAG_WRITE);
2452        if (unlikely(r)) {
2453                dm_integrity_io_error(ic, "writing tags", r);
2454                goto err;
2455        }
2456
2457advance_and_next:
2458        cond_resched();
2459
2460        spin_lock_irq(&ic->endio_wait.lock);
2461        remove_range_unlocked(ic, &range);
2462        ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors);
2463        goto next_chunk;
2464
2465err:
2466        remove_range(ic, &range);
2467        return;
2468
2469unlock_ret:
2470        spin_unlock_irq(&ic->endio_wait.lock);
2471
2472        recalc_write_super(ic);
2473}
2474
2475static void bitmap_block_work(struct work_struct *w)
2476{
2477        struct bitmap_block_status *bbs = container_of(w, struct bitmap_block_status, work);
2478        struct dm_integrity_c *ic = bbs->ic;
2479        struct bio *bio;
2480        struct bio_list bio_queue;
2481        struct bio_list waiting;
2482
2483        bio_list_init(&waiting);
2484
2485        spin_lock(&bbs->bio_queue_lock);
2486        bio_queue = bbs->bio_queue;
2487        bio_list_init(&bbs->bio_queue);
2488        spin_unlock(&bbs->bio_queue_lock);
2489
2490        while ((bio = bio_list_pop(&bio_queue))) {
2491                struct dm_integrity_io *dio;
2492
2493                dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
2494
2495                if (block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2496                                    dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
2497                        remove_range(ic, &dio->range);
2498                        INIT_WORK(&dio->work, integrity_bio_wait);
2499                        queue_work(ic->wait_wq, &dio->work);
2500                } else {
2501                        block_bitmap_op(ic, ic->journal, dio->range.logical_sector,
2502                                        dio->range.n_sectors, BITMAP_OP_SET);
2503                        bio_list_add(&waiting, bio);
2504                }
2505        }
2506
2507        if (bio_list_empty(&waiting))
2508                return;
2509
2510        rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC,
2511                           bbs->idx * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT),
2512                           BITMAP_BLOCK_SIZE >> SECTOR_SHIFT, NULL);
2513
2514        while ((bio = bio_list_pop(&waiting))) {
2515                struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
2516
2517                block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2518                                dio->range.n_sectors, BITMAP_OP_SET);
2519
2520                remove_range(ic, &dio->range);
2521                INIT_WORK(&dio->work, integrity_bio_wait);
2522                queue_work(ic->wait_wq, &dio->work);
2523        }
2524
2525        queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
2526}
2527
2528static void bitmap_flush_work(struct work_struct *work)
2529{
2530        struct dm_integrity_c *ic = container_of(work, struct dm_integrity_c, bitmap_flush_work.work);
2531        struct dm_integrity_range range;
2532        unsigned long limit;
2533        struct bio *bio;
2534
2535        dm_integrity_flush_buffers(ic);
2536
2537        range.logical_sector = 0;
2538        range.n_sectors = ic->provided_data_sectors;
2539
2540        spin_lock_irq(&ic->endio_wait.lock);
2541        add_new_range_and_wait(ic, &range);
2542        spin_unlock_irq(&ic->endio_wait.lock);
2543
2544        dm_integrity_flush_buffers(ic);
2545        if (ic->meta_dev)
2546                blkdev_issue_flush(ic->dev->bdev, GFP_NOIO, NULL);
2547
2548        limit = ic->provided_data_sectors;
2549        if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
2550                limit = le64_to_cpu(ic->sb->recalc_sector)
2551                        >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)
2552                        << (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2553        }
2554        /*DEBUG_print("zeroing journal\n");*/
2555        block_bitmap_op(ic, ic->journal, 0, limit, BITMAP_OP_CLEAR);
2556        block_bitmap_op(ic, ic->may_write_bitmap, 0, limit, BITMAP_OP_CLEAR);
2557
2558        rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
2559                           ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
2560
2561        spin_lock_irq(&ic->endio_wait.lock);
2562        remove_range_unlocked(ic, &range);
2563        while (unlikely((bio = bio_list_pop(&ic->synchronous_bios)) != NULL)) {
2564                bio_endio(bio);
2565                spin_unlock_irq(&ic->endio_wait.lock);
2566                spin_lock_irq(&ic->endio_wait.lock);
2567        }
2568        spin_unlock_irq(&ic->endio_wait.lock);
2569}
2570
2571
2572static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
2573                         unsigned n_sections, unsigned char commit_seq)
2574{
2575        unsigned i, j, n;
2576
2577        if (!n_sections)
2578                return;
2579
2580        for (n = 0; n < n_sections; n++) {
2581                i = start_section + n;
2582                wraparound_section(ic, &i);
2583                for (j = 0; j < ic->journal_section_sectors; j++) {
2584                        struct journal_sector *js = access_journal(ic, i, j);
2585                        memset(&js->entries, 0, JOURNAL_SECTOR_DATA);
2586                        js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq);
2587                }
2588                for (j = 0; j < ic->journal_section_entries; j++) {
2589                        struct journal_entry *je = access_journal_entry(ic, i, j);
2590                        journal_entry_set_unused(je);
2591                }
2592        }
2593
2594        write_journal(ic, start_section, n_sections);
2595}
2596
2597static int find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, commit_id_t id)
2598{
2599        unsigned char k;
2600        for (k = 0; k < N_COMMIT_IDS; k++) {
2601                if (dm_integrity_commit_id(ic, i, j, k) == id)
2602                        return k;
2603        }
2604        dm_integrity_io_error(ic, "journal commit id", -EIO);
2605        return -EIO;
2606}
2607
2608static void replay_journal(struct dm_integrity_c *ic)
2609{
2610        unsigned i, j;
2611        bool used_commit_ids[N_COMMIT_IDS];
2612        unsigned max_commit_id_sections[N_COMMIT_IDS];
2613        unsigned write_start, write_sections;
2614        unsigned continue_section;
2615        bool journal_empty;
2616        unsigned char unused, last_used, want_commit_seq;
2617
2618        if (ic->mode == 'R')
2619                return;
2620
2621        if (ic->journal_uptodate)
2622                return;
2623
2624        last_used = 0;
2625        write_start = 0;
2626
2627        if (!ic->just_formatted) {
2628                DEBUG_print("reading journal\n");
2629                rw_journal(ic, REQ_OP_READ, 0, 0, ic->journal_sections, NULL);
2630                if (ic->journal_io)
2631                        DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal");
2632                if (ic->journal_io) {
2633                        struct journal_completion crypt_comp;
2634                        crypt_comp.ic = ic;
2635                        init_completion(&crypt_comp.comp);
2636                        crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0);
2637                        encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp);
2638                        wait_for_completion(&crypt_comp.comp);
2639                }
2640                DEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, "decrypted journal");
2641        }
2642
2643        if (dm_integrity_failed(ic))
2644                goto clear_journal;
2645
2646        journal_empty = true;
2647        memset(used_commit_ids, 0, sizeof used_commit_ids);
2648        memset(max_commit_id_sections, 0, sizeof max_commit_id_sections);
2649        for (i = 0; i < ic->journal_sections; i++) {
2650                for (j = 0; j < ic->journal_section_sectors; j++) {
2651                        int k;
2652                        struct journal_sector *js = access_journal(ic, i, j);
2653                        k = find_commit_seq(ic, i, j, js->commit_id);
2654                        if (k < 0)
2655                                goto clear_journal;
2656                        used_commit_ids[k] = true;
2657                        max_commit_id_sections[k] = i;
2658                }
2659                if (journal_empty) {
2660                        for (j = 0; j < ic->journal_section_entries; j++) {
2661                                struct journal_entry *je = access_journal_entry(ic, i, j);
2662                                if (!journal_entry_is_unused(je)) {
2663                                        journal_empty = false;
2664                                        break;
2665                                }
2666                        }
2667                }
2668        }
2669
2670        if (!used_commit_ids[N_COMMIT_IDS - 1]) {
2671                unused = N_COMMIT_IDS - 1;
2672                while (unused && !used_commit_ids[unused - 1])
2673                        unused--;
2674        } else {
2675                for (unused = 0; unused < N_COMMIT_IDS; unused++)
2676                        if (!used_commit_ids[unused])
2677                                break;
2678                if (unused == N_COMMIT_IDS) {
2679                        dm_integrity_io_error(ic, "journal commit ids", -EIO);
2680                        goto clear_journal;
2681                }
2682        }
2683        DEBUG_print("first unused commit seq %d [%d,%d,%d,%d]\n",
2684                    unused, used_commit_ids[0], used_commit_ids[1],
2685                    used_commit_ids[2], used_commit_ids[3]);
2686
2687        last_used = prev_commit_seq(unused);
2688        want_commit_seq = prev_commit_seq(last_used);
2689
2690        if (!used_commit_ids[want_commit_seq] && used_commit_ids[prev_commit_seq(want_commit_seq)])
2691                journal_empty = true;
2692
2693        write_start = max_commit_id_sections[last_used] + 1;
2694        if (unlikely(write_start >= ic->journal_sections))
2695                want_commit_seq = next_commit_seq(want_commit_seq);
2696        wraparound_section(ic, &write_start);
2697
2698        i = write_start;
2699        for (write_sections = 0; write_sections < ic->journal_sections; write_sections++) {
2700                for (j = 0; j < ic->journal_section_sectors; j++) {
2701                        struct journal_sector *js = access_journal(ic, i, j);
2702
2703                        if (js->commit_id != dm_integrity_commit_id(ic, i, j, want_commit_seq)) {
2704                                /*
2705                                 * This could be caused by crash during writing.
2706                                 * We won't replay the inconsistent part of the
2707                                 * journal.
2708                                 */
2709                                DEBUG_print("commit id mismatch at position (%u, %u): %d != %d\n",
2710                                            i, j, find_commit_seq(ic, i, j, js->commit_id), want_commit_seq);
2711                                goto brk;
2712                        }
2713                }
2714                i++;
2715                if (unlikely(i >= ic->journal_sections))
2716                        want_commit_seq = next_commit_seq(want_commit_seq);
2717                wraparound_section(ic, &i);
2718        }
2719brk:
2720
2721        if (!journal_empty) {
2722                DEBUG_print("replaying %u sections, starting at %u, commit seq %d\n",
2723                            write_sections, write_start, want_commit_seq);
2724                do_journal_write(ic, write_start, write_sections, true);
2725        }
2726
2727        if (write_sections == ic->journal_sections && (ic->mode == 'J' || journal_empty)) {
2728                continue_section = write_start;
2729                ic->commit_seq = want_commit_seq;
2730                DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq);
2731        } else {
2732                unsigned s;
2733                unsigned char erase_seq;
2734clear_journal:
2735                DEBUG_print("clearing journal\n");
2736
2737                erase_seq = prev_commit_seq(prev_commit_seq(last_used));
2738                s = write_start;
2739                init_journal(ic, s, 1, erase_seq);
2740                s++;
2741                wraparound_section(ic, &s);
2742                if (ic->journal_sections >= 2) {
2743                        init_journal(ic, s, ic->journal_sections - 2, erase_seq);
2744                        s += ic->journal_sections - 2;
2745                        wraparound_section(ic, &s);
2746                        init_journal(ic, s, 1, erase_seq);
2747                }
2748
2749                continue_section = 0;
2750                ic->commit_seq = next_commit_seq(erase_seq);
2751        }
2752
2753        ic->committed_section = continue_section;
2754        ic->n_committed_sections = 0;
2755
2756        ic->uncommitted_section = continue_section;
2757        ic->n_uncommitted_sections = 0;
2758
2759        ic->free_section = continue_section;
2760        ic->free_section_entry = 0;
2761        ic->free_sectors = ic->journal_entries;
2762
2763        ic->journal_tree_root = RB_ROOT;
2764        for (i = 0; i < ic->journal_entries; i++)
2765                init_journal_node(&ic->journal_tree[i]);
2766}
2767
2768static void dm_integrity_enter_synchronous_mode(struct dm_integrity_c *ic)
2769{
2770        DEBUG_print("dm_integrity_enter_synchronous_mode\n");
2771
2772        if (ic->mode == 'B') {
2773                ic->bitmap_flush_interval = msecs_to_jiffies(10) + 1;
2774                ic->synchronous_mode = 1;
2775
2776                cancel_delayed_work_sync(&ic->bitmap_flush_work);
2777                queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
2778                flush_workqueue(ic->commit_wq);
2779        }
2780}
2781
2782static int dm_integrity_reboot(struct notifier_block *n, unsigned long code, void *x)
2783{
2784        struct dm_integrity_c *ic = container_of(n, struct dm_integrity_c, reboot_notifier);
2785
2786        DEBUG_print("dm_integrity_reboot\n");
2787
2788        dm_integrity_enter_synchronous_mode(ic);
2789
2790        return NOTIFY_DONE;
2791}
2792
2793static void dm_integrity_postsuspend(struct dm_target *ti)
2794{
2795        struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2796        int r;
2797
2798        WARN_ON(unregister_reboot_notifier(&ic->reboot_notifier));
2799
2800        del_timer_sync(&ic->autocommit_timer);
2801
2802        WRITE_ONCE(ic->suspending, 1);
2803
2804        if (ic->recalc_wq)
2805                drain_workqueue(ic->recalc_wq);
2806
2807        if (ic->mode == 'B')
2808                cancel_delayed_work_sync(&ic->bitmap_flush_work);
2809
2810        queue_work(ic->commit_wq, &ic->commit_work);
2811        drain_workqueue(ic->commit_wq);
2812
2813        if (ic->mode == 'J') {
2814                if (ic->meta_dev)
2815                        queue_work(ic->writer_wq, &ic->writer_work);
2816                drain_workqueue(ic->writer_wq);
2817                dm_integrity_flush_buffers(ic);
2818        }
2819
2820        if (ic->mode == 'B') {
2821                dm_integrity_flush_buffers(ic);
2822#if 1
2823                /* set to 0 to test bitmap replay code */
2824                init_journal(ic, 0, ic->journal_sections, 0);
2825                ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
2826                r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
2827                if (unlikely(r))
2828                        dm_integrity_io_error(ic, "writing superblock", r);
2829#endif
2830        }
2831
2832        WRITE_ONCE(ic->suspending, 0);
2833
2834        BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
2835
2836        ic->journal_uptodate = true;
2837}
2838
2839static void dm_integrity_resume(struct dm_target *ti)
2840{
2841        struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2842        int r;
2843        DEBUG_print("resume\n");
2844
2845        if (ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) {
2846                DEBUG_print("resume dirty_bitmap\n");
2847                rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
2848                                   ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
2849                if (ic->mode == 'B') {
2850                        if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) {
2851                                block_bitmap_copy(ic, ic->recalc_bitmap, ic->journal);
2852                                block_bitmap_copy(ic, ic->may_write_bitmap, ic->journal);
2853                                if (!block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors,
2854                                                     BITMAP_OP_TEST_ALL_CLEAR)) {
2855                                        ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
2856                                        ic->sb->recalc_sector = cpu_to_le64(0);
2857                                }
2858                        } else {
2859                                DEBUG_print("non-matching blocks_per_bitmap_bit: %u, %u\n",
2860                                            ic->sb->log2_blocks_per_bitmap_bit, ic->log2_blocks_per_bitmap_bit);
2861                                ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
2862                                block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
2863                                block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
2864                                block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_SET);
2865                                rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
2866                                                   ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
2867                                ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
2868                                ic->sb->recalc_sector = cpu_to_le64(0);
2869                        }
2870                } else {
2871                        if (!(ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
2872                              block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR))) {
2873                                ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
2874                                ic->sb->recalc_sector = cpu_to_le64(0);
2875                        }
2876                        init_journal(ic, 0, ic->journal_sections, 0);
2877                        replay_journal(ic);
2878                        ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
2879                }
2880                r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
2881                if (unlikely(r))
2882                        dm_integrity_io_error(ic, "writing superblock", r);
2883        } else {
2884                replay_journal(ic);
2885                if (ic->mode == 'B') {
2886                        int mode;
2887                        ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
2888                        ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
2889                        r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
2890                        if (unlikely(r))
2891                                dm_integrity_io_error(ic, "writing superblock", r);
2892
2893                        mode = ic->recalculate_flag ? BITMAP_OP_SET : BITMAP_OP_CLEAR;
2894                        block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, mode);
2895                        block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, mode);
2896                        block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, mode);
2897                        rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
2898                                           ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
2899                }
2900        }
2901
2902        DEBUG_print("testing recalc: %x\n", ic->sb->flags);
2903        if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
2904                __u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector);
2905                DEBUG_print("recalc pos: %lx / %lx\n", (long)recalc_pos, ic->provided_data_sectors);
2906                if (recalc_pos < ic->provided_data_sectors) {
2907                        queue_work(ic->recalc_wq, &ic->recalc_work);
2908                } else if (recalc_pos > ic->provided_data_sectors) {
2909                        ic->sb->recalc_sector = cpu_to_le64(ic->provided_data_sectors);
2910                        recalc_write_super(ic);
2911                }
2912        }
2913
2914        ic->reboot_notifier.notifier_call = dm_integrity_reboot;
2915        ic->reboot_notifier.next = NULL;
2916        ic->reboot_notifier.priority = INT_MAX - 1;     /* be notified after md and before hardware drivers */
2917        WARN_ON(register_reboot_notifier(&ic->reboot_notifier));
2918
2919#if 0
2920        /* set to 1 to stress test synchronous mode */
2921        dm_integrity_enter_synchronous_mode(ic);
2922#endif
2923}
2924
2925static void dm_integrity_status(struct dm_target *ti, status_type_t type,
2926                                unsigned status_flags, char *result, unsigned maxlen)
2927{
2928        struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2929        unsigned arg_count;
2930        size_t sz = 0;
2931
2932        switch (type) {
2933        case STATUSTYPE_INFO:
2934                DMEMIT("%llu %llu",
2935                        (unsigned long long)atomic64_read(&ic->number_of_mismatches),
2936                        (unsigned long long)ic->provided_data_sectors);
2937                if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
2938                        DMEMIT(" %llu", (unsigned long long)le64_to_cpu(ic->sb->recalc_sector));
2939                else
2940                        DMEMIT(" -");
2941                break;
2942
2943        case STATUSTYPE_TABLE: {
2944                __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
2945                watermark_percentage += ic->journal_entries / 2;
2946                do_div(watermark_percentage, ic->journal_entries);
2947                arg_count = 3;
2948                arg_count += !!ic->meta_dev;
2949                arg_count += ic->sectors_per_block != 1;
2950                arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING));
2951                arg_count += ic->mode == 'J';
2952                arg_count += ic->mode == 'J';
2953                arg_count += ic->mode == 'B';
2954                arg_count += ic->mode == 'B';
2955                arg_count += !!ic->internal_hash_alg.alg_string;
2956                arg_count += !!ic->journal_crypt_alg.alg_string;
2957                arg_count += !!ic->journal_mac_alg.alg_string;
2958                DMEMIT("%s %llu %u %c %u", ic->dev->name, (unsigned long long)ic->start,
2959                       ic->tag_size, ic->mode, arg_count);
2960                if (ic->meta_dev)
2961                        DMEMIT(" meta_device:%s", ic->meta_dev->name);
2962                if (ic->sectors_per_block != 1)
2963                        DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
2964                if (ic->recalculate_flag)
2965                        DMEMIT(" recalculate");
2966                DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
2967                DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
2968                DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
2969                if (ic->mode == 'J') {
2970                        DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage);
2971                        DMEMIT(" commit_time:%u", ic->autocommit_msec);
2972                }
2973                if (ic->mode == 'B') {
2974                        DMEMIT(" sectors_per_bit:%llu", (unsigned long long)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit);
2975                        DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic->bitmap_flush_interval));
2976                }
2977
2978#define EMIT_ALG(a, n)                                                  \
2979                do {                                                    \
2980                        if (ic->a.alg_string) {                         \
2981                                DMEMIT(" %s:%s", n, ic->a.alg_string);  \
2982                                if (ic->a.key_string)                   \
2983                                        DMEMIT(":%s", ic->a.key_string);\
2984                        }                                               \
2985                } while (0)
2986                EMIT_ALG(internal_hash_alg, "internal_hash");
2987                EMIT_ALG(journal_crypt_alg, "journal_crypt");
2988                EMIT_ALG(journal_mac_alg, "journal_mac");
2989                break;
2990        }
2991        }
2992}
2993
2994static int dm_integrity_iterate_devices(struct dm_target *ti,
2995                                        iterate_devices_callout_fn fn, void *data)
2996{
2997        struct dm_integrity_c *ic = ti->private;
2998
2999        if (!ic->meta_dev)
3000                return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data);
3001        else
3002                return fn(ti, ic->dev, 0, ti->len, data);
3003}
3004
3005static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *limits)
3006{
3007        struct dm_integrity_c *ic = ti->private;
3008
3009        if (ic->sectors_per_block > 1) {
3010                limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
3011                limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
3012                blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
3013        }
3014}
3015
3016static void calculate_journal_section_size(struct dm_integrity_c *ic)
3017{
3018        unsigned sector_space = JOURNAL_SECTOR_DATA;
3019
3020        ic->journal_sections = le32_to_cpu(ic->sb->journal_sections);
3021        ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block]) + ic->tag_size,
3022                                         JOURNAL_ENTRY_ROUNDUP);
3023
3024        if (ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC))
3025                sector_space -= JOURNAL_MAC_PER_SECTOR;
3026        ic->journal_entries_per_sector = sector_space / ic->journal_entry_size;
3027        ic->journal_section_entries = ic->journal_entries_per_sector * JOURNAL_BLOCK_SECTORS;
3028        ic->journal_section_sectors = (ic->journal_section_entries << ic->sb->log2_sectors_per_block) + JOURNAL_BLOCK_SECTORS;
3029        ic->journal_entries = ic->journal_section_entries * ic->journal_sections;
3030}
3031
3032static int calculate_device_limits(struct dm_integrity_c *ic)
3033{
3034        __u64 initial_sectors;
3035
3036        calculate_journal_section_size(ic);
3037        initial_sectors = SB_SECTORS + (__u64)ic->journal_section_sectors * ic->journal_sections;
3038        if (initial_sectors + METADATA_PADDING_SECTORS >= ic->meta_device_sectors || initial_sectors > UINT_MAX)
3039                return -EINVAL;
3040        ic->initial_sectors = initial_sectors;
3041
3042        if (!ic->meta_dev) {
3043                sector_t last_sector, last_area, last_offset;
3044
3045                ic->metadata_run = roundup((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block),
3046                                           (__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS)) >> SECTOR_SHIFT;
3047                if (!(ic->metadata_run & (ic->metadata_run - 1)))
3048                        ic->log2_metadata_run = __ffs(ic->metadata_run);
3049                else
3050                        ic->log2_metadata_run = -1;
3051
3052                get_area_and_offset(ic, ic->provided_data_sectors - 1, &last_area, &last_offset);
3053                last_sector = get_data_sector(ic, last_area, last_offset);
3054                if (last_sector < ic->start || last_sector >= ic->meta_device_sectors)
3055                        return -EINVAL;
3056        } else {
3057                __u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
3058                meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1))
3059                                >> (ic->log2_buffer_sectors + SECTOR_SHIFT);
3060                meta_size <<= ic->log2_buffer_sectors;
3061                if (ic->initial_sectors + meta_size < ic->initial_sectors ||
3062                    ic->initial_sectors + meta_size > ic->meta_device_sectors)
3063                        return -EINVAL;
3064                ic->metadata_run = 1;
3065                ic->log2_metadata_run = 0;
3066        }
3067
3068        return 0;
3069}
3070
3071static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sectors, unsigned interleave_sectors)
3072{
3073        unsigned journal_sections;
3074        int test_bit;
3075
3076        memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT);
3077        memcpy(ic->sb->magic, SB_MAGIC, 8);
3078        ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size);
3079        ic->sb->log2_sectors_per_block = __ffs(ic->sectors_per_block);
3080        if (ic->journal_mac_alg.alg_string)
3081                ic->sb->flags |= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC);
3082
3083        calculate_journal_section_size(ic);
3084        journal_sections = journal_sectors / ic->journal_section_sectors;
3085        if (!journal_sections)
3086                journal_sections = 1;
3087
3088        if (!ic->meta_dev) {
3089                ic->sb->journal_sections = cpu_to_le32(journal_sections);
3090                if (!interleave_sectors)
3091                        interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
3092                ic->sb->log2_interleave_sectors = __fls(interleave_sectors);
3093                ic->sb->log2_interleave_sectors = max((__u8)MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
3094                ic->sb->log2_interleave_sectors = min((__u8)MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
3095
3096                ic->provided_data_sectors = 0;
3097                for (test_bit = fls64(ic->meta_device_sectors) - 1; test_bit >= 3; test_bit--) {
3098                        __u64 prev_data_sectors = ic->provided_data_sectors;
3099
3100                        ic->provided_data_sectors |= (sector_t)1 << test_bit;
3101                        if (calculate_device_limits(ic))
3102                                ic->provided_data_sectors = prev_data_sectors;
3103                }
3104                if (!ic->provided_data_sectors)
3105                        return -EINVAL;
3106        } else {
3107                ic->sb->log2_interleave_sectors = 0;
3108                ic->provided_data_sectors = ic->data_device_sectors;
3109                ic->provided_data_sectors &= ~(sector_t)(ic->sectors_per_block - 1);
3110
3111try_smaller_buffer:
3112                ic->sb->journal_sections = cpu_to_le32(0);
3113                for (test_bit = fls(journal_sections) - 1; test_bit >= 0; test_bit--) {
3114                        __u32 prev_journal_sections = le32_to_cpu(ic->sb->journal_sections);
3115                        __u32 test_journal_sections = prev_journal_sections | (1U << test_bit);
3116                        if (test_journal_sections > journal_sections)
3117                                continue;
3118                        ic->sb->journal_sections = cpu_to_le32(test_journal_sections);
3119                        if (calculate_device_limits(ic))
3120                                ic->sb->journal_sections = cpu_to_le32(prev_journal_sections);
3121
3122                }
3123                if (!le32_to_cpu(ic->sb->journal_sections)) {
3124                        if (ic->log2_buffer_sectors > 3) {
3125                                ic->log2_buffer_sectors--;
3126                                goto try_smaller_buffer;
3127                        }
3128                        return -EINVAL;
3129                }
3130        }
3131
3132        ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
3133
3134        sb_set_version(ic);
3135
3136        return 0;
3137}
3138
3139static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
3140{
3141        struct gendisk *disk = dm_disk(dm_table_get_md(ti->table));
3142        struct blk_integrity bi;
3143
3144        memset(&bi, 0, sizeof(bi));
3145        bi.profile = &dm_integrity_profile;
3146        bi.tuple_size = ic->tag_size;
3147        bi.tag_size = bi.tuple_size;
3148        bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT;
3149
3150        blk_integrity_register(disk, &bi);
3151        blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
3152}
3153
3154static void dm_integrity_free_page_list(struct page_list *pl)
3155{
3156        unsigned i;
3157
3158        if (!pl)
3159                return;
3160        for (i = 0; pl[i].page; i++)
3161                __free_page(pl[i].page);
3162        kvfree(pl);
3163}
3164
3165static struct page_list *dm_integrity_alloc_page_list(unsigned n_pages)
3166{
3167        struct page_list *pl;
3168        unsigned i;
3169
3170        pl = kvmalloc_array(n_pages + 1, sizeof(struct page_list), GFP_KERNEL | __GFP_ZERO);
3171        if (!pl)
3172                return NULL;
3173
3174        for (i = 0; i < n_pages; i++) {
3175                pl[i].page = alloc_page(GFP_KERNEL);
3176                if (!pl[i].page) {
3177                        dm_integrity_free_page_list(pl);
3178                        return NULL;
3179                }
3180                if (i)
3181                        pl[i - 1].next = &pl[i];
3182        }
3183        pl[i].page = NULL;
3184        pl[i].next = NULL;
3185
3186        return pl;
3187}
3188
3189static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl)
3190{
3191        unsigned i;
3192        for (i = 0; i < ic->journal_sections; i++)
3193                kvfree(sl[i]);
3194        kvfree(sl);
3195}
3196
3197static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic,
3198                                                                   struct page_list *pl)
3199{
3200        struct scatterlist **sl;
3201        unsigned i;
3202
3203        sl = kvmalloc_array(ic->journal_sections,
3204                            sizeof(struct scatterlist *),
3205                            GFP_KERNEL | __GFP_ZERO);
3206        if (!sl)
3207                return NULL;
3208
3209        for (i = 0; i < ic->journal_sections; i++) {
3210                struct scatterlist *s;
3211                unsigned start_index, start_offset;
3212                unsigned end_index, end_offset;
3213                unsigned n_pages;
3214                unsigned idx;
3215
3216                page_list_location(ic, i, 0, &start_index, &start_offset);
3217                page_list_location(ic, i, ic->journal_section_sectors - 1,
3218                                   &end_index, &end_offset);
3219
3220                n_pages = (end_index - start_index + 1);
3221
3222                s = kvmalloc_array(n_pages, sizeof(struct scatterlist),
3223                                   GFP_KERNEL);
3224                if (!s) {
3225                        dm_integrity_free_journal_scatterlist(ic, sl);
3226                        return NULL;
3227                }
3228
3229                sg_init_table(s, n_pages);
3230                for (idx = start_index; idx <= end_index; idx++) {
3231                        char *va = lowmem_page_address(pl[idx].page);
3232                        unsigned start = 0, end = PAGE_SIZE;
3233                        if (idx == start_index)
3234                                start = start_offset;
3235                        if (idx == end_index)
3236                                end = end_offset + (1 << SECTOR_SHIFT);
3237                        sg_set_buf(&s[idx - start_index], va + start, end - start);
3238                }
3239
3240                sl[i] = s;
3241        }
3242
3243        return sl;
3244}
3245
3246static void free_alg(struct alg_spec *a)
3247{
3248        kzfree(a->alg_string);
3249        kzfree(a->key);
3250        memset(a, 0, sizeof *a);
3251}
3252
3253static int get_alg_and_key(const char *arg, struct alg_spec *a, char **error, char *error_inval)
3254{
3255        char *k;
3256
3257        free_alg(a);
3258
3259        a->alg_string = kstrdup(strchr(arg, ':') + 1, GFP_KERNEL);
3260        if (!a->alg_string)
3261                goto nomem;
3262
3263        k = strchr(a->alg_string, ':');
3264        if (k) {
3265                *k = 0;
3266                a->key_string = k + 1;
3267                if (strlen(a->key_string) & 1)
3268                        goto inval;
3269
3270                a->key_size = strlen(a->key_string) / 2;
3271                a->key = kmalloc(a->key_size, GFP_KERNEL);
3272                if (!a->key)
3273                        goto nomem;
3274                if (hex2bin(a->key, a->key_string, a->key_size))
3275                        goto inval;
3276        }
3277
3278        return 0;
3279inval:
3280        *error = error_inval;
3281        return -EINVAL;
3282nomem:
3283        *error = "Out of memory for an argument";
3284        return -ENOMEM;
3285}
3286
3287static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
3288                   char *error_alg, char *error_key)
3289{
3290        int r;
3291
3292        if (a->alg_string) {
3293                *hash = crypto_alloc_shash(a->alg_string, 0, 0);
3294                if (IS_ERR(*hash)) {
3295                        *error = error_alg;
3296                        r = PTR_ERR(*hash);
3297                        *hash = NULL;
3298                        return r;
3299                }
3300
3301                if (a->key) {
3302                        r = crypto_shash_setkey(*hash, a->key, a->key_size);
3303                        if (r) {
3304                                *error = error_key;
3305                                return r;
3306                        }
3307                } else if (crypto_shash_get_flags(*hash) & CRYPTO_TFM_NEED_KEY) {
3308                        *error = error_key;
3309                        return -ENOKEY;
3310                }
3311        }
3312
3313        return 0;
3314}
3315
3316static int create_journal(struct dm_integrity_c *ic, char **error)
3317{
3318        int r = 0;
3319        unsigned i;
3320        __u64 journal_pages, journal_desc_size, journal_tree_size;
3321        unsigned char *crypt_data = NULL, *crypt_iv = NULL;
3322        struct skcipher_request *req = NULL;
3323
3324        ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
3325        ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
3326        ic->commit_ids[2] = cpu_to_le64(0x3333333333333333ULL);
3327        ic->commit_ids[3] = cpu_to_le64(0x4444444444444444ULL);
3328
3329        journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors,
3330                                PAGE_SIZE >> SECTOR_SHIFT) >> (PAGE_SHIFT - SECTOR_SHIFT);
3331        journal_desc_size = journal_pages * sizeof(struct page_list);
3332        if (journal_pages >= totalram_pages() - totalhigh_pages() || journal_desc_size > ULONG_MAX) {
3333                *error = "Journal doesn't fit into memory";
3334                r = -ENOMEM;
3335                goto bad;
3336        }
3337        ic->journal_pages = journal_pages;
3338
3339        ic->journal = dm_integrity_alloc_page_list(ic->journal_pages);
3340        if (!ic->journal) {
3341                *error = "Could not allocate memory for journal";
3342                r = -ENOMEM;
3343                goto bad;
3344        }
3345        if (ic->journal_crypt_alg.alg_string) {
3346                unsigned ivsize, blocksize;
3347                struct journal_completion comp;
3348
3349                comp.ic = ic;
3350                ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, 0);
3351                if (IS_ERR(ic->journal_crypt)) {
3352                        *error = "Invalid journal cipher";
3353                        r = PTR_ERR(ic->journal_crypt);
3354                        ic->journal_crypt = NULL;
3355                        goto bad;
3356                }
3357                ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
3358                blocksize = crypto_skcipher_blocksize(ic->journal_crypt);
3359
3360                if (ic->journal_crypt_alg.key) {
3361                        r = crypto_skcipher_setkey(ic->journal_crypt, ic->journal_crypt_alg.key,
3362                                                   ic->journal_crypt_alg.key_size);
3363                        if (r) {
3364                                *error = "Error setting encryption key";
3365                                goto bad;
3366                        }
3367                }
3368                DEBUG_print("cipher %s, block size %u iv size %u\n",
3369                            ic->journal_crypt_alg.alg_string, blocksize, ivsize);
3370
3371                ic->journal_io = dm_integrity_alloc_page_list(ic->journal_pages);
3372                if (!ic->journal_io) {
3373                        *error = "Could not allocate memory for journal io";
3374                        r = -ENOMEM;
3375                        goto bad;
3376                }
3377
3378                if (blocksize == 1) {
3379                        struct scatterlist *sg;
3380
3381                        req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3382                        if (!req) {
3383                                *error = "Could not allocate crypt request";
3384                                r = -ENOMEM;
3385                                goto bad;
3386                        }
3387
3388                        crypt_iv = kzalloc(ivsize, GFP_KERNEL);
3389                        if (!crypt_iv) {
3390                                *error = "Could not allocate iv";
3391                                r = -ENOMEM;
3392                                goto bad;
3393                        }
3394
3395                        ic->journal_xor = dm_integrity_alloc_page_list(ic->journal_pages);
3396                        if (!ic->journal_xor) {
3397                                *error = "Could not allocate memory for journal xor";
3398                                r = -ENOMEM;
3399                                goto bad;
3400                        }
3401
3402                        sg = kvmalloc_array(ic->journal_pages + 1,
3403                                            sizeof(struct scatterlist),
3404                                            GFP_KERNEL);
3405                        if (!sg) {
3406                                *error = "Unable to allocate sg list";
3407                                r = -ENOMEM;
3408                                goto bad;
3409                        }
3410                        sg_init_table(sg, ic->journal_pages + 1);
3411                        for (i = 0; i < ic->journal_pages; i++) {
3412                                char *va = lowmem_page_address(ic->journal_xor[i].page);
3413                                clear_page(va);
3414                                sg_set_buf(&sg[i], va, PAGE_SIZE);
3415                        }
3416                        sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
3417
3418                        skcipher_request_set_crypt(req, sg, sg,
3419                                                   PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
3420                        init_completion(&comp.comp);
3421                        comp.in_flight = (atomic_t)ATOMIC_INIT(1);
3422                        if (do_crypt(true, req, &comp))
3423                                wait_for_completion(&comp.comp);
3424                        kvfree(sg);
3425                        r = dm_integrity_failed(ic);
3426                        if (r) {
3427                                *error = "Unable to encrypt journal";
3428                                goto bad;
3429                        }
3430                        DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data");
3431
3432                        crypto_free_skcipher(ic->journal_crypt);
3433                        ic->journal_crypt = NULL;
3434                } else {
3435                        unsigned crypt_len = roundup(ivsize, blocksize);
3436
3437                        req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3438                        if (!req) {
3439                                *error = "Could not allocate crypt request";
3440                                r = -ENOMEM;
3441                                goto bad;
3442                        }
3443
3444                        crypt_iv = kmalloc(ivsize, GFP_KERNEL);
3445                        if (!crypt_iv) {
3446                                *error = "Could not allocate iv";
3447                                r = -ENOMEM;
3448                                goto bad;
3449                        }
3450
3451                        crypt_data = kmalloc(crypt_len, GFP_KERNEL);
3452                        if (!crypt_data) {
3453                                *error = "Unable to allocate crypt data";
3454                                r = -ENOMEM;
3455                                goto bad;
3456                        }
3457
3458                        ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
3459                        if (!ic->journal_scatterlist) {
3460                                *error = "Unable to allocate sg list";
3461                                r = -ENOMEM;
3462                                goto bad;
3463                        }
3464                        ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal_io);
3465                        if (!ic->journal_io_scatterlist) {
3466                                *error = "Unable to allocate sg list";
3467                                r = -ENOMEM;
3468                                goto bad;
3469                        }
3470                        ic->sk_requests = kvmalloc_array(ic->journal_sections,
3471                                                         sizeof(struct skcipher_request *),
3472                                                         GFP_KERNEL | __GFP_ZERO);
3473                        if (!ic->sk_requests) {
3474                                *error = "Unable to allocate sk requests";
3475                                r = -ENOMEM;
3476                                goto bad;
3477                        }
3478                        for (i = 0; i < ic->journal_sections; i++) {
3479                                struct scatterlist sg;
3480                                struct skcipher_request *section_req;
3481                                __u32 section_le = cpu_to_le32(i);
3482
3483                                memset(crypt_iv, 0x00, ivsize);
3484                                memset(crypt_data, 0x00, crypt_len);
3485                                memcpy(crypt_data, &section_le, min((size_t)crypt_len, sizeof(section_le)));
3486
3487                                sg_init_one(&sg, crypt_data, crypt_len);
3488                                skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv);
3489                                init_completion(&comp.comp);
3490                                comp.in_flight = (atomic_t)ATOMIC_INIT(1);
3491                                if (do_crypt(true, req, &comp))
3492                                        wait_for_completion(&comp.comp);
3493
3494                                r = dm_integrity_failed(ic);
3495                                if (r) {
3496                                        *error = "Unable to generate iv";
3497                                        goto bad;
3498                                }
3499
3500                                section_req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3501                                if (!section_req) {
3502                                        *error = "Unable to allocate crypt request";
3503                                        r = -ENOMEM;
3504                                        goto bad;
3505                                }
3506                                section_req->iv = kmalloc_array(ivsize, 2,
3507                                                                GFP_KERNEL);
3508                                if (!section_req->iv) {
3509                                        skcipher_request_free(section_req);
3510                                        *error = "Unable to allocate iv";
3511                                        r = -ENOMEM;
3512                                        goto bad;
3513                                }
3514                                memcpy(section_req->iv + ivsize, crypt_data, ivsize);
3515                                section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT;
3516                                ic->sk_requests[i] = section_req;
3517                                DEBUG_bytes(crypt_data, ivsize, "iv(%u)", i);
3518                        }
3519                }
3520        }
3521
3522        for (i = 0; i < N_COMMIT_IDS; i++) {
3523                unsigned j;
3524retest_commit_id:
3525                for (j = 0; j < i; j++) {
3526                        if (ic->commit_ids[j] == ic->commit_ids[i]) {
3527                                ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1);
3528                                goto retest_commit_id;
3529                        }
3530                }
3531                DEBUG_print("commit id %u: %016llx\n", i, ic->commit_ids[i]);
3532        }
3533
3534        journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node);
3535        if (journal_tree_size > ULONG_MAX) {
3536                *error = "Journal doesn't fit into memory";
3537                r = -ENOMEM;
3538                goto bad;
3539        }
3540        ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL);
3541        if (!ic->journal_tree) {
3542                *error = "Could not allocate memory for journal tree";
3543                r = -ENOMEM;
3544        }
3545bad:
3546        kfree(crypt_data);
3547        kfree(crypt_iv);
3548        skcipher_request_free(req);
3549
3550        return r;
3551}
3552
3553/*
3554 * Construct a integrity mapping
3555 *
3556 * Arguments:
3557 *      device
3558 *      offset from the start of the device
3559 *      tag size
3560 *      D - direct writes, J - journal writes, B - bitmap mode, R - recovery mode
3561 *      number of optional arguments
3562 *      optional arguments:
3563 *              journal_sectors
3564 *              interleave_sectors
3565 *              buffer_sectors
3566 *              journal_watermark
3567 *              commit_time
3568 *              meta_device
3569 *              block_size
3570 *              sectors_per_bit
3571 *              bitmap_flush_interval
3572 *              internal_hash
3573 *              journal_crypt
3574 *              journal_mac
3575 *              recalculate
3576 */
3577static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
3578{
3579        struct dm_integrity_c *ic;
3580        char dummy;
3581        int r;
3582        unsigned extra_args;
3583        struct dm_arg_set as;
3584        static const struct dm_arg _args[] = {
3585                {0, 9, "Invalid number of feature args"},
3586        };
3587        unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
3588        bool should_write_sb;
3589        __u64 threshold;
3590        unsigned long long start;
3591        __s8 log2_sectors_per_bitmap_bit = -1;
3592        __s8 log2_blocks_per_bitmap_bit;
3593        __u64 bits_in_journal;
3594        __u64 n_bitmap_bits;
3595
3596#define DIRECT_ARGUMENTS        4
3597
3598        if (argc <= DIRECT_ARGUMENTS) {
3599                ti->error = "Invalid argument count";
3600                return -EINVAL;
3601        }
3602
3603        ic = kzalloc(sizeof(struct dm_integrity_c), GFP_KERNEL);
3604        if (!ic) {
3605                ti->error = "Cannot allocate integrity context";
3606                return -ENOMEM;
3607        }
3608        ti->private = ic;
3609        ti->per_io_data_size = sizeof(struct dm_integrity_io);
3610
3611        ic->in_progress = RB_ROOT;
3612        INIT_LIST_HEAD(&ic->wait_list);
3613        init_waitqueue_head(&ic->endio_wait);
3614        bio_list_init(&ic->flush_bio_list);
3615        init_waitqueue_head(&ic->copy_to_journal_wait);
3616        init_completion(&ic->crypto_backoff);
3617        atomic64_set(&ic->number_of_mismatches, 0);
3618        ic->bitmap_flush_interval = BITMAP_FLUSH_INTERVAL;
3619
3620        r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev);
3621        if (r) {
3622                ti->error = "Device lookup failed";
3623                goto bad;
3624        }
3625
3626        if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) {
3627                ti->error = "Invalid starting offset";
3628                r = -EINVAL;
3629                goto bad;
3630        }
3631        ic->start = start;
3632
3633        if (strcmp(argv[2], "-")) {
3634                if (sscanf(argv[2], "%u%c", &ic->tag_size, &dummy) != 1 || !ic->tag_size) {
3635                        ti->error = "Invalid tag size";
3636                        r = -EINVAL;
3637                        goto bad;
3638                }
3639        }
3640
3641        if (!strcmp(argv[3], "J") || !strcmp(argv[3], "B") ||
3642            !strcmp(argv[3], "D") || !strcmp(argv[3], "R")) {
3643                ic->mode = argv[3][0];
3644        } else {
3645                ti->error = "Invalid mode (expecting J, B, D, R)";
3646                r = -EINVAL;
3647                goto bad;
3648        }
3649
3650        journal_sectors = 0;
3651        interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
3652        buffer_sectors = DEFAULT_BUFFER_SECTORS;
3653        journal_watermark = DEFAULT_JOURNAL_WATERMARK;
3654        sync_msec = DEFAULT_SYNC_MSEC;
3655        ic->sectors_per_block = 1;
3656
3657        as.argc = argc - DIRECT_ARGUMENTS;
3658        as.argv = argv + DIRECT_ARGUMENTS;
3659        r = dm_read_arg_group(_args, &as, &extra_args, &ti->error);
3660        if (r)
3661                goto bad;
3662
3663        while (extra_args--) {
3664                const char *opt_string;
3665                unsigned val;
3666                unsigned long long llval;
3667                opt_string = dm_shift_arg(&as);
3668                if (!opt_string) {
3669                        r = -EINVAL;
3670                        ti->error = "Not enough feature arguments";
3671                        goto bad;
3672                }
3673                if (sscanf(opt_string, "journal_sectors:%u%c", &val, &dummy) == 1)
3674                        journal_sectors = val ? val : 1;
3675                else if (sscanf(opt_string, "interleave_sectors:%u%c", &val, &dummy) == 1)
3676                        interleave_sectors = val;
3677                else if (sscanf(opt_string, "buffer_sectors:%u%c", &val, &dummy) == 1)
3678                        buffer_sectors = val;
3679                else if (sscanf(opt_string, "journal_watermark:%u%c", &val, &dummy) == 1 && val <= 100)
3680                        journal_watermark = val;
3681                else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
3682                        sync_msec = val;
3683                else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) {
3684                        if (ic->meta_dev) {
3685                                dm_put_device(ti, ic->meta_dev);
3686                                ic->meta_dev = NULL;
3687                        }
3688                        r = dm_get_device(ti, strchr(opt_string, ':') + 1,
3689                                          dm_table_get_mode(ti->table), &ic->meta_dev);
3690                        if (r) {
3691                                ti->error = "Device lookup failed";
3692                                goto bad;
3693                        }
3694                } else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) {
3695                        if (val < 1 << SECTOR_SHIFT ||
3696                            val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT ||
3697                            (val & (val -1))) {
3698                                r = -EINVAL;
3699                                ti->error = "Invalid block_size argument";
3700                                goto bad;
3701                        }
3702                        ic->sectors_per_block = val >> SECTOR_SHIFT;
3703                } else if (sscanf(opt_string, "sectors_per_bit:%llu%c", &llval, &dummy) == 1) {
3704                        log2_sectors_per_bitmap_bit = !llval ? 0 : __ilog2_u64(llval);
3705                } else if (sscanf(opt_string, "bitmap_flush_interval:%u%c", &val, &dummy) == 1) {
3706                        if (val >= (uint64_t)UINT_MAX * 1000 / HZ) {
3707                                r = -EINVAL;
3708                                ti->error = "Invalid bitmap_flush_interval argument";
3709                        }
3710                        ic->bitmap_flush_interval = msecs_to_jiffies(val);
3711                } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
3712                        r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
3713                                            "Invalid internal_hash argument");
3714                        if (r)
3715                                goto bad;
3716                } else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
3717                        r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
3718                                            "Invalid journal_crypt argument");
3719                        if (r)
3720                                goto bad;
3721                } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
3722                        r = get_alg_and_key(opt_string, &ic->journal_mac_alg,  &ti->error,
3723                                            "Invalid journal_mac argument");
3724                        if (r)
3725                                goto bad;
3726                } else if (!strcmp(opt_string, "recalculate")) {
3727                        ic->recalculate_flag = true;
3728                } else {
3729                        r = -EINVAL;
3730                        ti->error = "Invalid argument";
3731                        goto bad;
3732                }
3733        }
3734
3735        ic->data_device_sectors = i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT;
3736        if (!ic->meta_dev)
3737                ic->meta_device_sectors = ic->data_device_sectors;
3738        else
3739                ic->meta_device_sectors = i_size_read(ic->meta_dev->bdev->bd_inode) >> SECTOR_SHIFT;
3740
3741        if (!journal_sectors) {
3742                journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS,
3743                                      ic->data_device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR);
3744        }
3745
3746        if (!buffer_sectors)
3747                buffer_sectors = 1;
3748        ic->log2_buffer_sectors = min((int)__fls(buffer_sectors), 31 - SECTOR_SHIFT);
3749
3750        r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error,
3751                    "Invalid internal hash", "Error setting internal hash key");
3752        if (r)
3753                goto bad;
3754
3755        r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error,
3756                    "Invalid journal mac", "Error setting journal mac key");
3757        if (r)
3758                goto bad;
3759
3760        if (!ic->tag_size) {
3761                if (!ic->internal_hash) {
3762                        ti->error = "Unknown tag size";
3763                        r = -EINVAL;
3764                        goto bad;
3765                }
3766                ic->tag_size = crypto_shash_digestsize(ic->internal_hash);
3767        }
3768        if (ic->tag_size > MAX_TAG_SIZE) {
3769                ti->error = "Too big tag size";
3770                r = -EINVAL;
3771                goto bad;
3772        }
3773        if (!(ic->tag_size & (ic->tag_size - 1)))
3774                ic->log2_tag_size = __ffs(ic->tag_size);
3775        else
3776                ic->log2_tag_size = -1;
3777
3778        if (ic->mode == 'B' && !ic->internal_hash) {
3779                r = -EINVAL;
3780                ti->error = "Bitmap mode can be only used with internal hash";
3781                goto bad;
3782        }
3783
3784        ic->autocommit_jiffies = msecs_to_jiffies(sync_msec);
3785        ic->autocommit_msec = sync_msec;
3786        timer_setup(&ic->autocommit_timer, autocommit_fn, 0);
3787
3788        ic->io = dm_io_client_create();
3789        if (IS_ERR(ic->io)) {
3790                r = PTR_ERR(ic->io);
3791                ic->io = NULL;
3792                ti->error = "Cannot allocate dm io";
3793                goto bad;
3794        }
3795
3796        r = mempool_init_slab_pool(&ic->journal_io_mempool, JOURNAL_IO_MEMPOOL, journal_io_cache);
3797        if (r) {
3798                ti->error = "Cannot allocate mempool";
3799                goto bad;
3800        }
3801
3802        ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
3803                                          WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE);
3804        if (!ic->metadata_wq) {
3805                ti->error = "Cannot allocate workqueue";
3806                r = -ENOMEM;
3807                goto bad;
3808        }
3809
3810        /*
3811         * If this workqueue were percpu, it would cause bio reordering
3812         * and reduced performance.
3813         */
3814        ic->wait_wq = alloc_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
3815        if (!ic->wait_wq) {
3816                ti->error = "Cannot allocate workqueue";
3817                r = -ENOMEM;
3818                goto bad;
3819        }
3820
3821        ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1);
3822        if (!ic->commit_wq) {
3823                ti->error = "Cannot allocate workqueue";
3824                r = -ENOMEM;
3825                goto bad;
3826        }
3827        INIT_WORK(&ic->commit_work, integrity_commit);
3828
3829        if (ic->mode == 'J' || ic->mode == 'B') {
3830                ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1);
3831                if (!ic->writer_wq) {
3832                        ti->error = "Cannot allocate workqueue";
3833                        r = -ENOMEM;
3834                        goto bad;
3835                }
3836                INIT_WORK(&ic->writer_work, integrity_writer);
3837        }
3838
3839        ic->sb = alloc_pages_exact(SB_SECTORS << SECTOR_SHIFT, GFP_KERNEL);
3840        if (!ic->sb) {
3841                r = -ENOMEM;
3842                ti->error = "Cannot allocate superblock area";
3843                goto bad;
3844        }
3845
3846        r = sync_rw_sb(ic, REQ_OP_READ, 0);
3847        if (r) {
3848                ti->error = "Error reading superblock";
3849                goto bad;
3850        }
3851        should_write_sb = false;
3852        if (memcmp(ic->sb->magic, SB_MAGIC, 8)) {
3853                if (ic->mode != 'R') {
3854                        if (memchr_inv(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT)) {
3855                                r = -EINVAL;
3856                                ti->error = "The device is not initialized";
3857                                goto bad;
3858                        }
3859                }
3860
3861                r = initialize_superblock(ic, journal_sectors, interleave_sectors);
3862                if (r) {
3863                        ti->error = "Could not initialize superblock";
3864                        goto bad;
3865                }
3866                if (ic->mode != 'R')
3867                        should_write_sb = true;
3868        }
3869
3870        if (!ic->sb->version || ic->sb->version > SB_VERSION_3) {
3871                r = -EINVAL;
3872                ti->error = "Unknown version";
3873                goto bad;
3874        }
3875        if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) {
3876                r = -EINVAL;
3877                ti->error = "Tag size doesn't match the information in superblock";
3878                goto bad;
3879        }
3880        if (ic->sb->log2_sectors_per_block != __ffs(ic->sectors_per_block)) {
3881                r = -EINVAL;
3882                ti->error = "Block size doesn't match the information in superblock";
3883                goto bad;
3884        }
3885        if (!le32_to_cpu(ic->sb->journal_sections)) {
3886                r = -EINVAL;
3887                ti->error = "Corrupted superblock, journal_sections is 0";
3888                goto bad;
3889        }
3890        /* make sure that ti->max_io_len doesn't overflow */
3891        if (!ic->meta_dev) {
3892                if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
3893                    ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
3894                        r = -EINVAL;
3895                        ti->error = "Invalid interleave_sectors in the superblock";
3896                        goto bad;
3897                }
3898        } else {
3899                if (ic->sb->log2_interleave_sectors) {
3900                        r = -EINVAL;
3901                        ti->error = "Invalid interleave_sectors in the superblock";
3902                        goto bad;
3903                }
3904        }
3905        ic->provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors);
3906        if (ic->provided_data_sectors != le64_to_cpu(ic->sb->provided_data_sectors)) {
3907                /* test for overflow */
3908                r = -EINVAL;
3909                ti->error = "The superblock has 64-bit device size, but the kernel was compiled with 32-bit sectors";
3910                goto bad;
3911        }
3912        if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string) {
3913                r = -EINVAL;
3914                ti->error = "Journal mac mismatch";
3915                goto bad;
3916        }
3917
3918try_smaller_buffer:
3919        r = calculate_device_limits(ic);
3920        if (r) {
3921                if (ic->meta_dev) {
3922                        if (ic->log2_buffer_sectors > 3) {
3923                                ic->log2_buffer_sectors--;
3924                                goto try_smaller_buffer;
3925                        }
3926                }
3927                ti->error = "The device is too small";
3928                goto bad;
3929        }
3930
3931        if (log2_sectors_per_bitmap_bit < 0)
3932                log2_sectors_per_bitmap_bit = __fls(DEFAULT_SECTORS_PER_BITMAP_BIT);
3933        if (log2_sectors_per_bitmap_bit < ic->sb->log2_sectors_per_block)
3934                log2_sectors_per_bitmap_bit = ic->sb->log2_sectors_per_block;
3935
3936        bits_in_journal = ((__u64)ic->journal_section_sectors * ic->journal_sections) << (SECTOR_SHIFT + 3);
3937        if (bits_in_journal > UINT_MAX)
3938                bits_in_journal = UINT_MAX;
3939        while (bits_in_journal < (ic->provided_data_sectors + ((sector_t)1 << log2_sectors_per_bitmap_bit) - 1) >> log2_sectors_per_bitmap_bit)
3940                log2_sectors_per_bitmap_bit++;
3941
3942        log2_blocks_per_bitmap_bit = log2_sectors_per_bitmap_bit - ic->sb->log2_sectors_per_block;
3943        ic->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
3944        if (should_write_sb) {
3945                ic->sb->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
3946        }
3947        n_bitmap_bits = ((ic->provided_data_sectors >> ic->sb->log2_sectors_per_block)
3948                                + (((sector_t)1 << log2_blocks_per_bitmap_bit) - 1)) >> log2_blocks_per_bitmap_bit;
3949        ic->n_bitmap_blocks = DIV_ROUND_UP(n_bitmap_bits, BITMAP_BLOCK_SIZE * 8);
3950
3951        if (!ic->meta_dev)
3952                ic->log2_buffer_sectors = min(ic->log2_buffer_sectors, (__u8)__ffs(ic->metadata_run));
3953
3954        if (ti->len > ic->provided_data_sectors) {
3955                r = -EINVAL;
3956                ti->error = "Not enough provided sectors for requested mapping size";
3957                goto bad;
3958        }
3959
3960
3961        threshold = (__u64)ic->journal_entries * (100 - journal_watermark);
3962        threshold += 50;
3963        do_div(threshold, 100);
3964        ic->free_sectors_threshold = threshold;
3965
3966        DEBUG_print("initialized:\n");
3967        DEBUG_print("   integrity_tag_size %u\n", le16_to_cpu(ic->sb->integrity_tag_size));
3968        DEBUG_print("   journal_entry_size %u\n", ic->journal_entry_size);
3969        DEBUG_print("   journal_entries_per_sector %u\n", ic->journal_entries_per_sector);
3970        DEBUG_print("   journal_section_entries %u\n", ic->journal_section_entries);
3971        DEBUG_print("   journal_section_sectors %u\n", ic->journal_section_sectors);
3972        DEBUG_print("   journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
3973        DEBUG_print("   journal_entries %u\n", ic->journal_entries);
3974        DEBUG_print("   log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
3975        DEBUG_print("   data_device_sectors 0x%llx\n", i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT);
3976        DEBUG_print("   initial_sectors 0x%x\n", ic->initial_sectors);
3977        DEBUG_print("   metadata_run 0x%x\n", ic->metadata_run);
3978        DEBUG_print("   log2_metadata_run %d\n", ic->log2_metadata_run);
3979        DEBUG_print("   provided_data_sectors 0x%llx (%llu)\n", (unsigned long long)ic->provided_data_sectors,
3980                    (unsigned long long)ic->provided_data_sectors);
3981        DEBUG_print("   log2_buffer_sectors %u\n", ic->log2_buffer_sectors);
3982        DEBUG_print("   bits_in_journal %llu\n", (unsigned long long)bits_in_journal);
3983
3984        if (ic->recalculate_flag && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) {
3985                ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3986                ic->sb->recalc_sector = cpu_to_le64(0);
3987        }
3988
3989        if (ic->internal_hash) {
3990                ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
3991                if (!ic->recalc_wq ) {
3992                        ti->error = "Cannot allocate workqueue";
3993                        r = -ENOMEM;
3994                        goto bad;
3995                }
3996                INIT_WORK(&ic->recalc_work, integrity_recalc);
3997                ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT);
3998                if (!ic->recalc_buffer) {
3999                        ti->error = "Cannot allocate buffer for recalculating";
4000                        r = -ENOMEM;
4001                        goto bad;
4002                }
4003                ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
4004                                                 ic->tag_size, GFP_KERNEL);
4005                if (!ic->recalc_tags) {
4006                        ti->error = "Cannot allocate tags for recalculating";
4007                        r = -ENOMEM;
4008                        goto bad;
4009                }
4010        }
4011
4012        ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
4013                        1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL);
4014        if (IS_ERR(ic->bufio)) {
4015                r = PTR_ERR(ic->bufio);
4016                ti->error = "Cannot initialize dm-bufio";
4017                ic->bufio = NULL;
4018                goto bad;
4019        }
4020        dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors);
4021
4022        if (ic->mode != 'R') {
4023                r = create_journal(ic, &ti->error);
4024                if (r)
4025                        goto bad;
4026
4027        }
4028
4029        if (ic->mode == 'B') {
4030                unsigned i;
4031                unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
4032
4033                ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
4034                if (!ic->recalc_bitmap) {
4035                        r = -ENOMEM;
4036                        goto bad;
4037                }
4038                ic->may_write_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
4039                if (!ic->may_write_bitmap) {
4040                        r = -ENOMEM;
4041                        goto bad;
4042                }
4043                ic->bbs = kvmalloc_array(ic->n_bitmap_blocks, sizeof(struct bitmap_block_status), GFP_KERNEL);
4044                if (!ic->bbs) {
4045                        r = -ENOMEM;
4046                        goto bad;
4047                }
4048                INIT_DELAYED_WORK(&ic->bitmap_flush_work, bitmap_flush_work);
4049                for (i = 0; i < ic->n_bitmap_blocks; i++) {
4050                        struct bitmap_block_status *bbs = &ic->bbs[i];
4051                        unsigned sector, pl_index, pl_offset;
4052
4053                        INIT_WORK(&bbs->work, bitmap_block_work);
4054                        bbs->ic = ic;
4055                        bbs->idx = i;
4056                        bio_list_init(&bbs->bio_queue);
4057                        spin_lock_init(&bbs->bio_queue_lock);
4058
4059                        sector = i * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT);
4060                        pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
4061                        pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
4062
4063                        bbs->bitmap = lowmem_page_address(ic->journal[pl_index].page) + pl_offset;
4064                }
4065        }
4066
4067        if (should_write_sb) {
4068                int r;
4069
4070                init_journal(ic, 0, ic->journal_sections, 0);
4071                r = dm_integrity_failed(ic);
4072                if (unlikely(r)) {
4073                        ti->error = "Error initializing journal";
4074                        goto bad;
4075                }
4076                r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
4077                if (r) {
4078                        ti->error = "Error initializing superblock";
4079                        goto bad;
4080                }
4081                ic->just_formatted = true;
4082        }
4083
4084        if (!ic->meta_dev) {
4085                r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors);
4086                if (r)
4087                        goto bad;
4088        }
4089        if (ic->mode == 'B') {
4090                unsigned max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8);
4091                if (!max_io_len)
4092                        max_io_len = 1U << 31;
4093                DEBUG_print("max_io_len: old %u, new %u\n", ti->max_io_len, max_io_len);
4094                if (!ti->max_io_len || ti->max_io_len > max_io_len) {
4095                        r = dm_set_target_max_io_len(ti, max_io_len);
4096                        if (r)
4097                                goto bad;
4098                }
4099        }
4100
4101        if (!ic->internal_hash)
4102                dm_integrity_set(ti, ic);
4103
4104        ti->num_flush_bios = 1;
4105        ti->flush_supported = true;
4106
4107        return 0;
4108
4109bad:
4110        dm_integrity_dtr(ti);
4111        return r;
4112}
4113
4114static void dm_integrity_dtr(struct dm_target *ti)
4115{
4116        struct dm_integrity_c *ic = ti->private;
4117
4118        BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
4119        BUG_ON(!list_empty(&ic->wait_list));
4120
4121        if (ic->metadata_wq)
4122                destroy_workqueue(ic->metadata_wq);
4123        if (ic->wait_wq)
4124                destroy_workqueue(ic->wait_wq);
4125        if (ic->commit_wq)
4126                destroy_workqueue(ic->commit_wq);
4127        if (ic->writer_wq)
4128                destroy_workqueue(ic->writer_wq);
4129        if (ic->recalc_wq)
4130                destroy_workqueue(ic->recalc_wq);
4131        vfree(ic->recalc_buffer);
4132        kvfree(ic->recalc_tags);
4133        kvfree(ic->bbs);
4134        if (ic->bufio)
4135                dm_bufio_client_destroy(ic->bufio);
4136        mempool_exit(&ic->journal_io_mempool);
4137        if (ic->io)
4138                dm_io_client_destroy(ic->io);
4139        if (ic->dev)
4140                dm_put_device(ti, ic->dev);
4141        if (ic->meta_dev)
4142                dm_put_device(ti, ic->meta_dev);
4143        dm_integrity_free_page_list(ic->journal);
4144        dm_integrity_free_page_list(ic->journal_io);
4145        dm_integrity_free_page_list(ic->journal_xor);
4146        dm_integrity_free_page_list(ic->recalc_bitmap);
4147        dm_integrity_free_page_list(ic->may_write_bitmap);
4148        if (ic->journal_scatterlist)
4149                dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist);
4150        if (ic->journal_io_scatterlist)
4151                dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist);
4152        if (ic->sk_requests) {
4153                unsigned i;
4154
4155                for (i = 0; i < ic->journal_sections; i++) {
4156                        struct skcipher_request *req = ic->sk_requests[i];
4157                        if (req) {
4158                                kzfree(req->iv);
4159                                skcipher_request_free(req);
4160                        }
4161                }
4162                kvfree(ic->sk_requests);
4163        }
4164        kvfree(ic->journal_tree);
4165        if (ic->sb)
4166                free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT);
4167
4168        if (ic->internal_hash)
4169                crypto_free_shash(ic->internal_hash);
4170        free_alg(&ic->internal_hash_alg);
4171
4172        if (ic->journal_crypt)
4173                crypto_free_skcipher(ic->journal_crypt);
4174        free_alg(&ic->journal_crypt_alg);
4175
4176        if (ic->journal_mac)
4177                crypto_free_shash(ic->journal_mac);
4178        free_alg(&ic->journal_mac_alg);
4179
4180        kfree(ic);
4181}
4182
4183static struct target_type integrity_target = {
4184        .name                   = "integrity",
4185        .version                = {1, 3, 0},
4186        .module                 = THIS_MODULE,
4187        .features               = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
4188        .ctr                    = dm_integrity_ctr,
4189        .dtr                    = dm_integrity_dtr,
4190        .map                    = dm_integrity_map,
4191        .postsuspend            = dm_integrity_postsuspend,
4192        .resume                 = dm_integrity_resume,
4193        .status                 = dm_integrity_status,
4194        .iterate_devices        = dm_integrity_iterate_devices,
4195        .io_hints               = dm_integrity_io_hints,
4196};
4197
4198static int __init dm_integrity_init(void)
4199{
4200        int r;
4201
4202        journal_io_cache = kmem_cache_create("integrity_journal_io",
4203                                             sizeof(struct journal_io), 0, 0, NULL);
4204        if (!journal_io_cache) {
4205                DMERR("can't allocate journal io cache");
4206                return -ENOMEM;
4207        }
4208
4209        r = dm_register_target(&integrity_target);
4210
4211        if (r < 0)
4212                DMERR("register failed %d", r);
4213
4214        return r;
4215}
4216
4217static void __exit dm_integrity_exit(void)
4218{
4219        dm_unregister_target(&integrity_target);
4220        kmem_cache_destroy(journal_io_cache);
4221}
4222
4223module_init(dm_integrity_init);
4224module_exit(dm_integrity_exit);
4225
4226MODULE_AUTHOR("Milan Broz");
4227MODULE_AUTHOR("Mikulas Patocka");
4228MODULE_DESCRIPTION(DM_NAME " target for integrity tags extension");
4229MODULE_LICENSE("GPL");
4230