linux/drivers/md/dm-integrity.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2016-2017 Red Hat, Inc. All rights reserved.
   3 * Copyright (C) 2016-2017 Milan Broz
   4 * Copyright (C) 2016-2017 Mikulas Patocka
   5 *
   6 * This file is released under the GPL.
   7 */
   8
   9#include "dm-bio-record.h"
  10
  11#include <linux/compiler.h>
  12#include <linux/module.h>
  13#include <linux/device-mapper.h>
  14#include <linux/dm-io.h>
  15#include <linux/vmalloc.h>
  16#include <linux/sort.h>
  17#include <linux/rbtree.h>
  18#include <linux/delay.h>
  19#include <linux/random.h>
  20#include <linux/reboot.h>
  21#include <crypto/hash.h>
  22#include <crypto/skcipher.h>
  23#include <linux/async_tx.h>
  24#include <linux/dm-bufio.h>
  25
  26#define DM_MSG_PREFIX "integrity"
  27
  28#define DEFAULT_INTERLEAVE_SECTORS      32768
  29#define DEFAULT_JOURNAL_SIZE_FACTOR     7
  30#define DEFAULT_SECTORS_PER_BITMAP_BIT  32768
  31#define DEFAULT_BUFFER_SECTORS          128
  32#define DEFAULT_JOURNAL_WATERMARK       50
  33#define DEFAULT_SYNC_MSEC               10000
  34#define DEFAULT_MAX_JOURNAL_SECTORS     131072
  35#define MIN_LOG2_INTERLEAVE_SECTORS     3
  36#define MAX_LOG2_INTERLEAVE_SECTORS     31
  37#define METADATA_WORKQUEUE_MAX_ACTIVE   16
  38#define RECALC_SECTORS                  8192
  39#define RECALC_WRITE_SUPER              16
  40#define BITMAP_BLOCK_SIZE               4096    /* don't change it */
  41#define BITMAP_FLUSH_INTERVAL           (10 * HZ)
  42#define DISCARD_FILLER                  0xf6
  43
  44/*
  45 * Warning - DEBUG_PRINT prints security-sensitive data to the log,
  46 * so it should not be enabled in the official kernel
  47 */
  48//#define DEBUG_PRINT
  49//#define INTERNAL_VERIFY
  50
  51/*
  52 * On disk structures
  53 */
  54
  55#define SB_MAGIC                        "integrt"
  56#define SB_VERSION_1                    1
  57#define SB_VERSION_2                    2
  58#define SB_VERSION_3                    3
  59#define SB_VERSION_4                    4
  60#define SB_SECTORS                      8
  61#define MAX_SECTORS_PER_BLOCK           8
  62
  63struct superblock {
  64        __u8 magic[8];
  65        __u8 version;
  66        __u8 log2_interleave_sectors;
  67        __u16 integrity_tag_size;
  68        __u32 journal_sections;
  69        __u64 provided_data_sectors;    /* userspace uses this value */
  70        __u32 flags;
  71        __u8 log2_sectors_per_block;
  72        __u8 log2_blocks_per_bitmap_bit;
  73        __u8 pad[2];
  74        __u64 recalc_sector;
  75};
  76
  77#define SB_FLAG_HAVE_JOURNAL_MAC        0x1
  78#define SB_FLAG_RECALCULATING           0x2
  79#define SB_FLAG_DIRTY_BITMAP            0x4
  80#define SB_FLAG_FIXED_PADDING           0x8
  81
  82#define JOURNAL_ENTRY_ROUNDUP           8
  83
  84typedef __u64 commit_id_t;
  85#define JOURNAL_MAC_PER_SECTOR          8
  86
  87struct journal_entry {
  88        union {
  89                struct {
  90                        __u32 sector_lo;
  91                        __u32 sector_hi;
  92                } s;
  93                __u64 sector;
  94        } u;
  95        commit_id_t last_bytes[];
  96        /* __u8 tag[0]; */
  97};
  98
  99#define journal_entry_tag(ic, je)               ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block])
 100
 101#if BITS_PER_LONG == 64
 102#define journal_entry_set_sector(je, x)         do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0)
 103#else
 104#define journal_entry_set_sector(je, x)         do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0)
 105#endif
 106#define journal_entry_get_sector(je)            le64_to_cpu((je)->u.sector)
 107#define journal_entry_is_unused(je)             ((je)->u.s.sector_hi == cpu_to_le32(-1))
 108#define journal_entry_set_unused(je)            do { ((je)->u.s.sector_hi = cpu_to_le32(-1)); } while (0)
 109#define journal_entry_is_inprogress(je)         ((je)->u.s.sector_hi == cpu_to_le32(-2))
 110#define journal_entry_set_inprogress(je)        do { ((je)->u.s.sector_hi = cpu_to_le32(-2)); } while (0)
 111
 112#define JOURNAL_BLOCK_SECTORS           8
 113#define JOURNAL_SECTOR_DATA             ((1 << SECTOR_SHIFT) - sizeof(commit_id_t))
 114#define JOURNAL_MAC_SIZE                (JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS)
 115
 116struct journal_sector {
 117        __u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR];
 118        __u8 mac[JOURNAL_MAC_PER_SECTOR];
 119        commit_id_t commit_id;
 120};
 121
 122#define MAX_TAG_SIZE                    (JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK]))
 123
 124#define METADATA_PADDING_SECTORS        8
 125
 126#define N_COMMIT_IDS                    4
 127
 128static unsigned char prev_commit_seq(unsigned char seq)
 129{
 130        return (seq + N_COMMIT_IDS - 1) % N_COMMIT_IDS;
 131}
 132
 133static unsigned char next_commit_seq(unsigned char seq)
 134{
 135        return (seq + 1) % N_COMMIT_IDS;
 136}
 137
 138/*
 139 * In-memory structures
 140 */
 141
 142struct journal_node {
 143        struct rb_node node;
 144        sector_t sector;
 145};
 146
 147struct alg_spec {
 148        char *alg_string;
 149        char *key_string;
 150        __u8 *key;
 151        unsigned key_size;
 152};
 153
 154struct dm_integrity_c {
 155        struct dm_dev *dev;
 156        struct dm_dev *meta_dev;
 157        unsigned tag_size;
 158        __s8 log2_tag_size;
 159        sector_t start;
 160        mempool_t journal_io_mempool;
 161        struct dm_io_client *io;
 162        struct dm_bufio_client *bufio;
 163        struct workqueue_struct *metadata_wq;
 164        struct superblock *sb;
 165        unsigned journal_pages;
 166        unsigned n_bitmap_blocks;
 167
 168        struct page_list *journal;
 169        struct page_list *journal_io;
 170        struct page_list *journal_xor;
 171        struct page_list *recalc_bitmap;
 172        struct page_list *may_write_bitmap;
 173        struct bitmap_block_status *bbs;
 174        unsigned bitmap_flush_interval;
 175        int synchronous_mode;
 176        struct bio_list synchronous_bios;
 177        struct delayed_work bitmap_flush_work;
 178
 179        struct crypto_skcipher *journal_crypt;
 180        struct scatterlist **journal_scatterlist;
 181        struct scatterlist **journal_io_scatterlist;
 182        struct skcipher_request **sk_requests;
 183
 184        struct crypto_shash *journal_mac;
 185
 186        struct journal_node *journal_tree;
 187        struct rb_root journal_tree_root;
 188
 189        sector_t provided_data_sectors;
 190
 191        unsigned short journal_entry_size;
 192        unsigned char journal_entries_per_sector;
 193        unsigned char journal_section_entries;
 194        unsigned short journal_section_sectors;
 195        unsigned journal_sections;
 196        unsigned journal_entries;
 197        sector_t data_device_sectors;
 198        sector_t meta_device_sectors;
 199        unsigned initial_sectors;
 200        unsigned metadata_run;
 201        __s8 log2_metadata_run;
 202        __u8 log2_buffer_sectors;
 203        __u8 sectors_per_block;
 204        __u8 log2_blocks_per_bitmap_bit;
 205
 206        unsigned char mode;
 207
 208        int failed;
 209
 210        struct crypto_shash *internal_hash;
 211
 212        struct dm_target *ti;
 213
 214        /* these variables are locked with endio_wait.lock */
 215        struct rb_root in_progress;
 216        struct list_head wait_list;
 217        wait_queue_head_t endio_wait;
 218        struct workqueue_struct *wait_wq;
 219        struct workqueue_struct *offload_wq;
 220
 221        unsigned char commit_seq;
 222        commit_id_t commit_ids[N_COMMIT_IDS];
 223
 224        unsigned committed_section;
 225        unsigned n_committed_sections;
 226
 227        unsigned uncommitted_section;
 228        unsigned n_uncommitted_sections;
 229
 230        unsigned free_section;
 231        unsigned char free_section_entry;
 232        unsigned free_sectors;
 233
 234        unsigned free_sectors_threshold;
 235
 236        struct workqueue_struct *commit_wq;
 237        struct work_struct commit_work;
 238
 239        struct workqueue_struct *writer_wq;
 240        struct work_struct writer_work;
 241
 242        struct workqueue_struct *recalc_wq;
 243        struct work_struct recalc_work;
 244        u8 *recalc_buffer;
 245        u8 *recalc_tags;
 246
 247        struct bio_list flush_bio_list;
 248
 249        unsigned long autocommit_jiffies;
 250        struct timer_list autocommit_timer;
 251        unsigned autocommit_msec;
 252
 253        wait_queue_head_t copy_to_journal_wait;
 254
 255        struct completion crypto_backoff;
 256
 257        bool journal_uptodate;
 258        bool just_formatted;
 259        bool recalculate_flag;
 260        bool fix_padding;
 261        bool discard;
 262
 263        struct alg_spec internal_hash_alg;
 264        struct alg_spec journal_crypt_alg;
 265        struct alg_spec journal_mac_alg;
 266
 267        atomic64_t number_of_mismatches;
 268
 269        struct notifier_block reboot_notifier;
 270};
 271
 272struct dm_integrity_range {
 273        sector_t logical_sector;
 274        sector_t n_sectors;
 275        bool waiting;
 276        union {
 277                struct rb_node node;
 278                struct {
 279                        struct task_struct *task;
 280                        struct list_head wait_entry;
 281                };
 282        };
 283};
 284
 285struct dm_integrity_io {
 286        struct work_struct work;
 287
 288        struct dm_integrity_c *ic;
 289        enum req_opf op;
 290        bool fua;
 291
 292        struct dm_integrity_range range;
 293
 294        sector_t metadata_block;
 295        unsigned metadata_offset;
 296
 297        atomic_t in_flight;
 298        blk_status_t bi_status;
 299
 300        struct completion *completion;
 301
 302        struct dm_bio_details bio_details;
 303};
 304
 305struct journal_completion {
 306        struct dm_integrity_c *ic;
 307        atomic_t in_flight;
 308        struct completion comp;
 309};
 310
 311struct journal_io {
 312        struct dm_integrity_range range;
 313        struct journal_completion *comp;
 314};
 315
 316struct bitmap_block_status {
 317        struct work_struct work;
 318        struct dm_integrity_c *ic;
 319        unsigned idx;
 320        unsigned long *bitmap;
 321        struct bio_list bio_queue;
 322        spinlock_t bio_queue_lock;
 323
 324};
 325
 326static struct kmem_cache *journal_io_cache;
 327
 328#define JOURNAL_IO_MEMPOOL      32
 329
 330#ifdef DEBUG_PRINT
 331#define DEBUG_print(x, ...)     printk(KERN_DEBUG x, ##__VA_ARGS__)
 332static void __DEBUG_bytes(__u8 *bytes, size_t len, const char *msg, ...)
 333{
 334        va_list args;
 335        va_start(args, msg);
 336        vprintk(msg, args);
 337        va_end(args);
 338        if (len)
 339                pr_cont(":");
 340        while (len) {
 341                pr_cont(" %02x", *bytes);
 342                bytes++;
 343                len--;
 344        }
 345        pr_cont("\n");
 346}
 347#define DEBUG_bytes(bytes, len, msg, ...)       __DEBUG_bytes(bytes, len, KERN_DEBUG msg, ##__VA_ARGS__)
 348#else
 349#define DEBUG_print(x, ...)                     do { } while (0)
 350#define DEBUG_bytes(bytes, len, msg, ...)       do { } while (0)
 351#endif
 352
 353static void dm_integrity_prepare(struct request *rq)
 354{
 355}
 356
 357static void dm_integrity_complete(struct request *rq, unsigned int nr_bytes)
 358{
 359}
 360
 361/*
 362 * DM Integrity profile, protection is performed layer above (dm-crypt)
 363 */
 364static const struct blk_integrity_profile dm_integrity_profile = {
 365        .name                   = "DM-DIF-EXT-TAG",
 366        .generate_fn            = NULL,
 367        .verify_fn              = NULL,
 368        .prepare_fn             = dm_integrity_prepare,
 369        .complete_fn            = dm_integrity_complete,
 370};
 371
 372static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);
 373static void integrity_bio_wait(struct work_struct *w);
 374static void dm_integrity_dtr(struct dm_target *ti);
 375
 376static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err)
 377{
 378        if (err == -EILSEQ)
 379                atomic64_inc(&ic->number_of_mismatches);
 380        if (!cmpxchg(&ic->failed, 0, err))
 381                DMERR("Error on %s: %d", msg, err);
 382}
 383
 384static int dm_integrity_failed(struct dm_integrity_c *ic)
 385{
 386        return READ_ONCE(ic->failed);
 387}
 388
 389static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
 390                                          unsigned j, unsigned char seq)
 391{
 392        /*
 393         * Xor the number with section and sector, so that if a piece of
 394         * journal is written at wrong place, it is detected.
 395         */
 396        return ic->commit_ids[seq] ^ cpu_to_le64(((__u64)i << 32) ^ j);
 397}
 398
 399static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
 400                                sector_t *area, sector_t *offset)
 401{
 402        if (!ic->meta_dev) {
 403                __u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors;
 404                *area = data_sector >> log2_interleave_sectors;
 405                *offset = (unsigned)data_sector & ((1U << log2_interleave_sectors) - 1);
 406        } else {
 407                *area = 0;
 408                *offset = data_sector;
 409        }
 410}
 411
 412#define sector_to_block(ic, n)                                          \
 413do {                                                                    \
 414        BUG_ON((n) & (unsigned)((ic)->sectors_per_block - 1));          \
 415        (n) >>= (ic)->sb->log2_sectors_per_block;                       \
 416} while (0)
 417
 418static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area,
 419                                            sector_t offset, unsigned *metadata_offset)
 420{
 421        __u64 ms;
 422        unsigned mo;
 423
 424        ms = area << ic->sb->log2_interleave_sectors;
 425        if (likely(ic->log2_metadata_run >= 0))
 426                ms += area << ic->log2_metadata_run;
 427        else
 428                ms += area * ic->metadata_run;
 429        ms >>= ic->log2_buffer_sectors;
 430
 431        sector_to_block(ic, offset);
 432
 433        if (likely(ic->log2_tag_size >= 0)) {
 434                ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size);
 435                mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
 436        } else {
 437                ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors);
 438                mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
 439        }
 440        *metadata_offset = mo;
 441        return ms;
 442}
 443
 444static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset)
 445{
 446        sector_t result;
 447
 448        if (ic->meta_dev)
 449                return offset;
 450
 451        result = area << ic->sb->log2_interleave_sectors;
 452        if (likely(ic->log2_metadata_run >= 0))
 453                result += (area + 1) << ic->log2_metadata_run;
 454        else
 455                result += (area + 1) * ic->metadata_run;
 456
 457        result += (sector_t)ic->initial_sectors + offset;
 458        result += ic->start;
 459
 460        return result;
 461}
 462
 463static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr)
 464{
 465        if (unlikely(*sec_ptr >= ic->journal_sections))
 466                *sec_ptr -= ic->journal_sections;
 467}
 468
 469static void sb_set_version(struct dm_integrity_c *ic)
 470{
 471        if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING))
 472                ic->sb->version = SB_VERSION_4;
 473        else if (ic->mode == 'B' || ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP))
 474                ic->sb->version = SB_VERSION_3;
 475        else if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
 476                ic->sb->version = SB_VERSION_2;
 477        else
 478                ic->sb->version = SB_VERSION_1;
 479}
 480
 481static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags)
 482{
 483        struct dm_io_request io_req;
 484        struct dm_io_region io_loc;
 485
 486        io_req.bi_op = op;
 487        io_req.bi_op_flags = op_flags;
 488        io_req.mem.type = DM_IO_KMEM;
 489        io_req.mem.ptr.addr = ic->sb;
 490        io_req.notify.fn = NULL;
 491        io_req.client = ic->io;
 492        io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
 493        io_loc.sector = ic->start;
 494        io_loc.count = SB_SECTORS;
 495
 496        if (op == REQ_OP_WRITE)
 497                sb_set_version(ic);
 498
 499        return dm_io(&io_req, 1, &io_loc, NULL);
 500}
 501
 502#define BITMAP_OP_TEST_ALL_SET          0
 503#define BITMAP_OP_TEST_ALL_CLEAR        1
 504#define BITMAP_OP_SET                   2
 505#define BITMAP_OP_CLEAR                 3
 506
 507static bool block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap,
 508                            sector_t sector, sector_t n_sectors, int mode)
 509{
 510        unsigned long bit, end_bit, this_end_bit, page, end_page;
 511        unsigned long *data;
 512
 513        if (unlikely(((sector | n_sectors) & ((1 << ic->sb->log2_sectors_per_block) - 1)) != 0)) {
 514                DMCRIT("invalid bitmap access (%llx,%llx,%d,%d,%d)",
 515                        sector,
 516                        n_sectors,
 517                        ic->sb->log2_sectors_per_block,
 518                        ic->log2_blocks_per_bitmap_bit,
 519                        mode);
 520                BUG();
 521        }
 522
 523        if (unlikely(!n_sectors))
 524                return true;
 525
 526        bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
 527        end_bit = (sector + n_sectors - 1) >>
 528                (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
 529
 530        page = bit / (PAGE_SIZE * 8);
 531        bit %= PAGE_SIZE * 8;
 532
 533        end_page = end_bit / (PAGE_SIZE * 8);
 534        end_bit %= PAGE_SIZE * 8;
 535
 536repeat:
 537        if (page < end_page) {
 538                this_end_bit = PAGE_SIZE * 8 - 1;
 539        } else {
 540                this_end_bit = end_bit;
 541        }
 542
 543        data = lowmem_page_address(bitmap[page].page);
 544
 545        if (mode == BITMAP_OP_TEST_ALL_SET) {
 546                while (bit <= this_end_bit) {
 547                        if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
 548                                do {
 549                                        if (data[bit / BITS_PER_LONG] != -1)
 550                                                return false;
 551                                        bit += BITS_PER_LONG;
 552                                } while (this_end_bit >= bit + BITS_PER_LONG - 1);
 553                                continue;
 554                        }
 555                        if (!test_bit(bit, data))
 556                                return false;
 557                        bit++;
 558                }
 559        } else if (mode == BITMAP_OP_TEST_ALL_CLEAR) {
 560                while (bit <= this_end_bit) {
 561                        if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
 562                                do {
 563                                        if (data[bit / BITS_PER_LONG] != 0)
 564                                                return false;
 565                                        bit += BITS_PER_LONG;
 566                                } while (this_end_bit >= bit + BITS_PER_LONG - 1);
 567                                continue;
 568                        }
 569                        if (test_bit(bit, data))
 570                                return false;
 571                        bit++;
 572                }
 573        } else if (mode == BITMAP_OP_SET) {
 574                while (bit <= this_end_bit) {
 575                        if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
 576                                do {
 577                                        data[bit / BITS_PER_LONG] = -1;
 578                                        bit += BITS_PER_LONG;
 579                                } while (this_end_bit >= bit + BITS_PER_LONG - 1);
 580                                continue;
 581                        }
 582                        __set_bit(bit, data);
 583                        bit++;
 584                }
 585        } else if (mode == BITMAP_OP_CLEAR) {
 586                if (!bit && this_end_bit == PAGE_SIZE * 8 - 1)
 587                        clear_page(data);
 588                else while (bit <= this_end_bit) {
 589                        if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
 590                                do {
 591                                        data[bit / BITS_PER_LONG] = 0;
 592                                        bit += BITS_PER_LONG;
 593                                } while (this_end_bit >= bit + BITS_PER_LONG - 1);
 594                                continue;
 595                        }
 596                        __clear_bit(bit, data);
 597                        bit++;
 598                }
 599        } else {
 600                BUG();
 601        }
 602
 603        if (unlikely(page < end_page)) {
 604                bit = 0;
 605                page++;
 606                goto repeat;
 607        }
 608
 609        return true;
 610}
 611
 612static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst, struct page_list *src)
 613{
 614        unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
 615        unsigned i;
 616
 617        for (i = 0; i < n_bitmap_pages; i++) {
 618                unsigned long *dst_data = lowmem_page_address(dst[i].page);
 619                unsigned long *src_data = lowmem_page_address(src[i].page);
 620                copy_page(dst_data, src_data);
 621        }
 622}
 623
 624static struct bitmap_block_status *sector_to_bitmap_block(struct dm_integrity_c *ic, sector_t sector)
 625{
 626        unsigned bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
 627        unsigned bitmap_block = bit / (BITMAP_BLOCK_SIZE * 8);
 628
 629        BUG_ON(bitmap_block >= ic->n_bitmap_blocks);
 630        return &ic->bbs[bitmap_block];
 631}
 632
 633static void access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset,
 634                                 bool e, const char *function)
 635{
 636#if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY)
 637        unsigned limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
 638
 639        if (unlikely(section >= ic->journal_sections) ||
 640            unlikely(offset >= limit)) {
 641                DMCRIT("%s: invalid access at (%u,%u), limit (%u,%u)",
 642                       function, section, offset, ic->journal_sections, limit);
 643                BUG();
 644        }
 645#endif
 646}
 647
 648static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsigned offset,
 649                               unsigned *pl_index, unsigned *pl_offset)
 650{
 651        unsigned sector;
 652
 653        access_journal_check(ic, section, offset, false, "page_list_location");
 654
 655        sector = section * ic->journal_section_sectors + offset;
 656
 657        *pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
 658        *pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
 659}
 660
 661static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl,
 662                                               unsigned section, unsigned offset, unsigned *n_sectors)
 663{
 664        unsigned pl_index, pl_offset;
 665        char *va;
 666
 667        page_list_location(ic, section, offset, &pl_index, &pl_offset);
 668
 669        if (n_sectors)
 670                *n_sectors = (PAGE_SIZE - pl_offset) >> SECTOR_SHIFT;
 671
 672        va = lowmem_page_address(pl[pl_index].page);
 673
 674        return (struct journal_sector *)(va + pl_offset);
 675}
 676
 677static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset)
 678{
 679        return access_page_list(ic, ic->journal, section, offset, NULL);
 680}
 681
 682static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned section, unsigned n)
 683{
 684        unsigned rel_sector, offset;
 685        struct journal_sector *js;
 686
 687        access_journal_check(ic, section, n, true, "access_journal_entry");
 688
 689        rel_sector = n % JOURNAL_BLOCK_SECTORS;
 690        offset = n / JOURNAL_BLOCK_SECTORS;
 691
 692        js = access_journal(ic, section, rel_sector);
 693        return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size);
 694}
 695
 696static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned section, unsigned n)
 697{
 698        n <<= ic->sb->log2_sectors_per_block;
 699
 700        n += JOURNAL_BLOCK_SECTORS;
 701
 702        access_journal_check(ic, section, n, false, "access_journal_data");
 703
 704        return access_journal(ic, section, n);
 705}
 706
 707static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result[JOURNAL_MAC_SIZE])
 708{
 709        SHASH_DESC_ON_STACK(desc, ic->journal_mac);
 710        int r;
 711        unsigned j, size;
 712
 713        desc->tfm = ic->journal_mac;
 714
 715        r = crypto_shash_init(desc);
 716        if (unlikely(r)) {
 717                dm_integrity_io_error(ic, "crypto_shash_init", r);
 718                goto err;
 719        }
 720
 721        for (j = 0; j < ic->journal_section_entries; j++) {
 722                struct journal_entry *je = access_journal_entry(ic, section, j);
 723                r = crypto_shash_update(desc, (__u8 *)&je->u.sector, sizeof je->u.sector);
 724                if (unlikely(r)) {
 725                        dm_integrity_io_error(ic, "crypto_shash_update", r);
 726                        goto err;
 727                }
 728        }
 729
 730        size = crypto_shash_digestsize(ic->journal_mac);
 731
 732        if (likely(size <= JOURNAL_MAC_SIZE)) {
 733                r = crypto_shash_final(desc, result);
 734                if (unlikely(r)) {
 735                        dm_integrity_io_error(ic, "crypto_shash_final", r);
 736                        goto err;
 737                }
 738                memset(result + size, 0, JOURNAL_MAC_SIZE - size);
 739        } else {
 740                __u8 digest[HASH_MAX_DIGESTSIZE];
 741
 742                if (WARN_ON(size > sizeof(digest))) {
 743                        dm_integrity_io_error(ic, "digest_size", -EINVAL);
 744                        goto err;
 745                }
 746                r = crypto_shash_final(desc, digest);
 747                if (unlikely(r)) {
 748                        dm_integrity_io_error(ic, "crypto_shash_final", r);
 749                        goto err;
 750                }
 751                memcpy(result, digest, JOURNAL_MAC_SIZE);
 752        }
 753
 754        return;
 755err:
 756        memset(result, 0, JOURNAL_MAC_SIZE);
 757}
 758
 759static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr)
 760{
 761        __u8 result[JOURNAL_MAC_SIZE];
 762        unsigned j;
 763
 764        if (!ic->journal_mac)
 765                return;
 766
 767        section_mac(ic, section, result);
 768
 769        for (j = 0; j < JOURNAL_BLOCK_SECTORS; j++) {
 770                struct journal_sector *js = access_journal(ic, section, j);
 771
 772                if (likely(wr))
 773                        memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR);
 774                else {
 775                        if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR))
 776                                dm_integrity_io_error(ic, "journal mac", -EILSEQ);
 777                }
 778        }
 779}
 780
 781static void complete_journal_op(void *context)
 782{
 783        struct journal_completion *comp = context;
 784        BUG_ON(!atomic_read(&comp->in_flight));
 785        if (likely(atomic_dec_and_test(&comp->in_flight)))
 786                complete(&comp->comp);
 787}
 788
 789static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
 790                        unsigned n_sections, struct journal_completion *comp)
 791{
 792        struct async_submit_ctl submit;
 793        size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT;
 794        unsigned pl_index, pl_offset, section_index;
 795        struct page_list *source_pl, *target_pl;
 796
 797        if (likely(encrypt)) {
 798                source_pl = ic->journal;
 799                target_pl = ic->journal_io;
 800        } else {
 801                source_pl = ic->journal_io;
 802                target_pl = ic->journal;
 803        }
 804
 805        page_list_location(ic, section, 0, &pl_index, &pl_offset);
 806
 807        atomic_add(roundup(pl_offset + n_bytes, PAGE_SIZE) >> PAGE_SHIFT, &comp->in_flight);
 808
 809        init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, complete_journal_op, comp, NULL);
 810
 811        section_index = pl_index;
 812
 813        do {
 814                size_t this_step;
 815                struct page *src_pages[2];
 816                struct page *dst_page;
 817
 818                while (unlikely(pl_index == section_index)) {
 819                        unsigned dummy;
 820                        if (likely(encrypt))
 821                                rw_section_mac(ic, section, true);
 822                        section++;
 823                        n_sections--;
 824                        if (!n_sections)
 825                                break;
 826                        page_list_location(ic, section, 0, &section_index, &dummy);
 827                }
 828
 829                this_step = min(n_bytes, (size_t)PAGE_SIZE - pl_offset);
 830                dst_page = target_pl[pl_index].page;
 831                src_pages[0] = source_pl[pl_index].page;
 832                src_pages[1] = ic->journal_xor[pl_index].page;
 833
 834                async_xor(dst_page, src_pages, pl_offset, 2, this_step, &submit);
 835
 836                pl_index++;
 837                pl_offset = 0;
 838                n_bytes -= this_step;
 839        } while (n_bytes);
 840
 841        BUG_ON(n_sections);
 842
 843        async_tx_issue_pending_all();
 844}
 845
 846static void complete_journal_encrypt(struct crypto_async_request *req, int err)
 847{
 848        struct journal_completion *comp = req->data;
 849        if (unlikely(err)) {
 850                if (likely(err == -EINPROGRESS)) {
 851                        complete(&comp->ic->crypto_backoff);
 852                        return;
 853                }
 854                dm_integrity_io_error(comp->ic, "asynchronous encrypt", err);
 855        }
 856        complete_journal_op(comp);
 857}
 858
 859static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
 860{
 861        int r;
 862        skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
 863                                      complete_journal_encrypt, comp);
 864        if (likely(encrypt))
 865                r = crypto_skcipher_encrypt(req);
 866        else
 867                r = crypto_skcipher_decrypt(req);
 868        if (likely(!r))
 869                return false;
 870        if (likely(r == -EINPROGRESS))
 871                return true;
 872        if (likely(r == -EBUSY)) {
 873                wait_for_completion(&comp->ic->crypto_backoff);
 874                reinit_completion(&comp->ic->crypto_backoff);
 875                return true;
 876        }
 877        dm_integrity_io_error(comp->ic, "encrypt", r);
 878        return false;
 879}
 880
 881static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
 882                          unsigned n_sections, struct journal_completion *comp)
 883{
 884        struct scatterlist **source_sg;
 885        struct scatterlist **target_sg;
 886
 887        atomic_add(2, &comp->in_flight);
 888
 889        if (likely(encrypt)) {
 890                source_sg = ic->journal_scatterlist;
 891                target_sg = ic->journal_io_scatterlist;
 892        } else {
 893                source_sg = ic->journal_io_scatterlist;
 894                target_sg = ic->journal_scatterlist;
 895        }
 896
 897        do {
 898                struct skcipher_request *req;
 899                unsigned ivsize;
 900                char *iv;
 901
 902                if (likely(encrypt))
 903                        rw_section_mac(ic, section, true);
 904
 905                req = ic->sk_requests[section];
 906                ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
 907                iv = req->iv;
 908
 909                memcpy(iv, iv + ivsize, ivsize);
 910
 911                req->src = source_sg[section];
 912                req->dst = target_sg[section];
 913
 914                if (unlikely(do_crypt(encrypt, req, comp)))
 915                        atomic_inc(&comp->in_flight);
 916
 917                section++;
 918                n_sections--;
 919        } while (n_sections);
 920
 921        atomic_dec(&comp->in_flight);
 922        complete_journal_op(comp);
 923}
 924
 925static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
 926                            unsigned n_sections, struct journal_completion *comp)
 927{
 928        if (ic->journal_xor)
 929                return xor_journal(ic, encrypt, section, n_sections, comp);
 930        else
 931                return crypt_journal(ic, encrypt, section, n_sections, comp);
 932}
 933
 934static void complete_journal_io(unsigned long error, void *context)
 935{
 936        struct journal_completion *comp = context;
 937        if (unlikely(error != 0))
 938                dm_integrity_io_error(comp->ic, "writing journal", -EIO);
 939        complete_journal_op(comp);
 940}
 941
 942static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags,
 943                               unsigned sector, unsigned n_sectors, struct journal_completion *comp)
 944{
 945        struct dm_io_request io_req;
 946        struct dm_io_region io_loc;
 947        unsigned pl_index, pl_offset;
 948        int r;
 949
 950        if (unlikely(dm_integrity_failed(ic))) {
 951                if (comp)
 952                        complete_journal_io(-1UL, comp);
 953                return;
 954        }
 955
 956        pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
 957        pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
 958
 959        io_req.bi_op = op;
 960        io_req.bi_op_flags = op_flags;
 961        io_req.mem.type = DM_IO_PAGE_LIST;
 962        if (ic->journal_io)
 963                io_req.mem.ptr.pl = &ic->journal_io[pl_index];
 964        else
 965                io_req.mem.ptr.pl = &ic->journal[pl_index];
 966        io_req.mem.offset = pl_offset;
 967        if (likely(comp != NULL)) {
 968                io_req.notify.fn = complete_journal_io;
 969                io_req.notify.context = comp;
 970        } else {
 971                io_req.notify.fn = NULL;
 972        }
 973        io_req.client = ic->io;
 974        io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
 975        io_loc.sector = ic->start + SB_SECTORS + sector;
 976        io_loc.count = n_sectors;
 977
 978        r = dm_io(&io_req, 1, &io_loc, NULL);
 979        if (unlikely(r)) {
 980                dm_integrity_io_error(ic, op == REQ_OP_READ ? "reading journal" : "writing journal", r);
 981                if (comp) {
 982                        WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
 983                        complete_journal_io(-1UL, comp);
 984                }
 985        }
 986}
 987
 988static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section,
 989                       unsigned n_sections, struct journal_completion *comp)
 990{
 991        unsigned sector, n_sectors;
 992
 993        sector = section * ic->journal_section_sectors;
 994        n_sectors = n_sections * ic->journal_section_sectors;
 995
 996        rw_journal_sectors(ic, op, op_flags, sector, n_sectors, comp);
 997}
 998
 999static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections)
1000{
1001        struct journal_completion io_comp;
1002        struct journal_completion crypt_comp_1;
1003        struct journal_completion crypt_comp_2;
1004        unsigned i;
1005
1006        io_comp.ic = ic;
1007        init_completion(&io_comp.comp);
1008
1009        if (commit_start + commit_sections <= ic->journal_sections) {
1010                io_comp.in_flight = (atomic_t)ATOMIC_INIT(1);
1011                if (ic->journal_io) {
1012                        crypt_comp_1.ic = ic;
1013                        init_completion(&crypt_comp_1.comp);
1014                        crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1015                        encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1);
1016                        wait_for_completion_io(&crypt_comp_1.comp);
1017                } else {
1018                        for (i = 0; i < commit_sections; i++)
1019                                rw_section_mac(ic, commit_start + i, true);
1020                }
1021                rw_journal(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, commit_start,
1022                           commit_sections, &io_comp);
1023        } else {
1024                unsigned to_end;
1025                io_comp.in_flight = (atomic_t)ATOMIC_INIT(2);
1026                to_end = ic->journal_sections - commit_start;
1027                if (ic->journal_io) {
1028                        crypt_comp_1.ic = ic;
1029                        init_completion(&crypt_comp_1.comp);
1030                        crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1031                        encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1);
1032                        if (try_wait_for_completion(&crypt_comp_1.comp)) {
1033                                rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
1034                                reinit_completion(&crypt_comp_1.comp);
1035                                crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1036                                encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1);
1037                                wait_for_completion_io(&crypt_comp_1.comp);
1038                        } else {
1039                                crypt_comp_2.ic = ic;
1040                                init_completion(&crypt_comp_2.comp);
1041                                crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0);
1042                                encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2);
1043                                wait_for_completion_io(&crypt_comp_1.comp);
1044                                rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
1045                                wait_for_completion_io(&crypt_comp_2.comp);
1046                        }
1047                } else {
1048                        for (i = 0; i < to_end; i++)
1049                                rw_section_mac(ic, commit_start + i, true);
1050                        rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
1051                        for (i = 0; i < commit_sections - to_end; i++)
1052                                rw_section_mac(ic, i, true);
1053                }
1054                rw_journal(ic, REQ_OP_WRITE, REQ_FUA, 0, commit_sections - to_end, &io_comp);
1055        }
1056
1057        wait_for_completion_io(&io_comp.comp);
1058}
1059
1060static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset,
1061                              unsigned n_sectors, sector_t target, io_notify_fn fn, void *data)
1062{
1063        struct dm_io_request io_req;
1064        struct dm_io_region io_loc;
1065        int r;
1066        unsigned sector, pl_index, pl_offset;
1067
1068        BUG_ON((target | n_sectors | offset) & (unsigned)(ic->sectors_per_block - 1));
1069
1070        if (unlikely(dm_integrity_failed(ic))) {
1071                fn(-1UL, data);
1072                return;
1073        }
1074
1075        sector = section * ic->journal_section_sectors + JOURNAL_BLOCK_SECTORS + offset;
1076
1077        pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
1078        pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
1079
1080        io_req.bi_op = REQ_OP_WRITE;
1081        io_req.bi_op_flags = 0;
1082        io_req.mem.type = DM_IO_PAGE_LIST;
1083        io_req.mem.ptr.pl = &ic->journal[pl_index];
1084        io_req.mem.offset = pl_offset;
1085        io_req.notify.fn = fn;
1086        io_req.notify.context = data;
1087        io_req.client = ic->io;
1088        io_loc.bdev = ic->dev->bdev;
1089        io_loc.sector = target;
1090        io_loc.count = n_sectors;
1091
1092        r = dm_io(&io_req, 1, &io_loc, NULL);
1093        if (unlikely(r)) {
1094                WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
1095                fn(-1UL, data);
1096        }
1097}
1098
1099static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)
1100{
1101        return range1->logical_sector < range2->logical_sector + range2->n_sectors &&
1102               range1->logical_sector + range1->n_sectors > range2->logical_sector;
1103}
1104
1105static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
1106{
1107        struct rb_node **n = &ic->in_progress.rb_node;
1108        struct rb_node *parent;
1109
1110        BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned)(ic->sectors_per_block - 1));
1111
1112        if (likely(check_waiting)) {
1113                struct dm_integrity_range *range;
1114                list_for_each_entry(range, &ic->wait_list, wait_entry) {
1115                        if (unlikely(ranges_overlap(range, new_range)))
1116                                return false;
1117                }
1118        }
1119
1120        parent = NULL;
1121
1122        while (*n) {
1123                struct dm_integrity_range *range = container_of(*n, struct dm_integrity_range, node);
1124
1125                parent = *n;
1126                if (new_range->logical_sector + new_range->n_sectors <= range->logical_sector) {
1127                        n = &range->node.rb_left;
1128                } else if (new_range->logical_sector >= range->logical_sector + range->n_sectors) {
1129                        n = &range->node.rb_right;
1130                } else {
1131                        return false;
1132                }
1133        }
1134
1135        rb_link_node(&new_range->node, parent, n);
1136        rb_insert_color(&new_range->node, &ic->in_progress);
1137
1138        return true;
1139}
1140
1141static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range)
1142{
1143        rb_erase(&range->node, &ic->in_progress);
1144        while (unlikely(!list_empty(&ic->wait_list))) {
1145                struct dm_integrity_range *last_range =
1146                        list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
1147                struct task_struct *last_range_task;
1148                last_range_task = last_range->task;
1149                list_del(&last_range->wait_entry);
1150                if (!add_new_range(ic, last_range, false)) {
1151                        last_range->task = last_range_task;
1152                        list_add(&last_range->wait_entry, &ic->wait_list);
1153                        break;
1154                }
1155                last_range->waiting = false;
1156                wake_up_process(last_range_task);
1157        }
1158}
1159
1160static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range)
1161{
1162        unsigned long flags;
1163
1164        spin_lock_irqsave(&ic->endio_wait.lock, flags);
1165        remove_range_unlocked(ic, range);
1166        spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1167}
1168
1169static void wait_and_add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
1170{
1171        new_range->waiting = true;
1172        list_add_tail(&new_range->wait_entry, &ic->wait_list);
1173        new_range->task = current;
1174        do {
1175                __set_current_state(TASK_UNINTERRUPTIBLE);
1176                spin_unlock_irq(&ic->endio_wait.lock);
1177                io_schedule();
1178                spin_lock_irq(&ic->endio_wait.lock);
1179        } while (unlikely(new_range->waiting));
1180}
1181
1182static void add_new_range_and_wait(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
1183{
1184        if (unlikely(!add_new_range(ic, new_range, true)))
1185                wait_and_add_new_range(ic, new_range);
1186}
1187
1188static void init_journal_node(struct journal_node *node)
1189{
1190        RB_CLEAR_NODE(&node->node);
1191        node->sector = (sector_t)-1;
1192}
1193
1194static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector)
1195{
1196        struct rb_node **link;
1197        struct rb_node *parent;
1198
1199        node->sector = sector;
1200        BUG_ON(!RB_EMPTY_NODE(&node->node));
1201
1202        link = &ic->journal_tree_root.rb_node;
1203        parent = NULL;
1204
1205        while (*link) {
1206                struct journal_node *j;
1207                parent = *link;
1208                j = container_of(parent, struct journal_node, node);
1209                if (sector < j->sector)
1210                        link = &j->node.rb_left;
1211                else
1212                        link = &j->node.rb_right;
1213        }
1214
1215        rb_link_node(&node->node, parent, link);
1216        rb_insert_color(&node->node, &ic->journal_tree_root);
1217}
1218
1219static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node)
1220{
1221        BUG_ON(RB_EMPTY_NODE(&node->node));
1222        rb_erase(&node->node, &ic->journal_tree_root);
1223        init_journal_node(node);
1224}
1225
1226#define NOT_FOUND       (-1U)
1227
1228static unsigned find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)
1229{
1230        struct rb_node *n = ic->journal_tree_root.rb_node;
1231        unsigned found = NOT_FOUND;
1232        *next_sector = (sector_t)-1;
1233        while (n) {
1234                struct journal_node *j = container_of(n, struct journal_node, node);
1235                if (sector == j->sector) {
1236                        found = j - ic->journal_tree;
1237                }
1238                if (sector < j->sector) {
1239                        *next_sector = j->sector;
1240                        n = j->node.rb_left;
1241                } else {
1242                        n = j->node.rb_right;
1243                }
1244        }
1245
1246        return found;
1247}
1248
1249static bool test_journal_node(struct dm_integrity_c *ic, unsigned pos, sector_t sector)
1250{
1251        struct journal_node *node, *next_node;
1252        struct rb_node *next;
1253
1254        if (unlikely(pos >= ic->journal_entries))
1255                return false;
1256        node = &ic->journal_tree[pos];
1257        if (unlikely(RB_EMPTY_NODE(&node->node)))
1258                return false;
1259        if (unlikely(node->sector != sector))
1260                return false;
1261
1262        next = rb_next(&node->node);
1263        if (unlikely(!next))
1264                return true;
1265
1266        next_node = container_of(next, struct journal_node, node);
1267        return next_node->sector != sector;
1268}
1269
1270static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node)
1271{
1272        struct rb_node *next;
1273        struct journal_node *next_node;
1274        unsigned next_section;
1275
1276        BUG_ON(RB_EMPTY_NODE(&node->node));
1277
1278        next = rb_next(&node->node);
1279        if (unlikely(!next))
1280                return false;
1281
1282        next_node = container_of(next, struct journal_node, node);
1283
1284        if (next_node->sector != node->sector)
1285                return false;
1286
1287        next_section = (unsigned)(next_node - ic->journal_tree) / ic->journal_section_entries;
1288        if (next_section >= ic->committed_section &&
1289            next_section < ic->committed_section + ic->n_committed_sections)
1290                return true;
1291        if (next_section + ic->journal_sections < ic->committed_section + ic->n_committed_sections)
1292                return true;
1293
1294        return false;
1295}
1296
1297#define TAG_READ        0
1298#define TAG_WRITE       1
1299#define TAG_CMP         2
1300
1301static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
1302                               unsigned *metadata_offset, unsigned total_size, int op)
1303{
1304#define MAY_BE_FILLER           1
1305#define MAY_BE_HASH             2
1306        unsigned hash_offset = 0;
1307        unsigned may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
1308
1309        do {
1310                unsigned char *data, *dp;
1311                struct dm_buffer *b;
1312                unsigned to_copy;
1313                int r;
1314
1315                r = dm_integrity_failed(ic);
1316                if (unlikely(r))
1317                        return r;
1318
1319                data = dm_bufio_read(ic->bufio, *metadata_block, &b);
1320                if (IS_ERR(data))
1321                        return PTR_ERR(data);
1322
1323                to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size);
1324                dp = data + *metadata_offset;
1325                if (op == TAG_READ) {
1326                        memcpy(tag, dp, to_copy);
1327                } else if (op == TAG_WRITE) {
1328                        memcpy(dp, tag, to_copy);
1329                        dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy);
1330                } else {
1331                        /* e.g.: op == TAG_CMP */
1332
1333                        if (likely(is_power_of_2(ic->tag_size))) {
1334                                if (unlikely(memcmp(dp, tag, to_copy)))
1335                                        if (unlikely(!ic->discard) ||
1336                                            unlikely(memchr_inv(dp, DISCARD_FILLER, to_copy) != NULL)) {
1337                                                goto thorough_test;
1338                                }
1339                        } else {
1340                                unsigned i, ts;
1341thorough_test:
1342                                ts = total_size;
1343
1344                                for (i = 0; i < to_copy; i++, ts--) {
1345                                        if (unlikely(dp[i] != tag[i]))
1346                                                may_be &= ~MAY_BE_HASH;
1347                                        if (likely(dp[i] != DISCARD_FILLER))
1348                                                may_be &= ~MAY_BE_FILLER;
1349                                        hash_offset++;
1350                                        if (unlikely(hash_offset == ic->tag_size)) {
1351                                                if (unlikely(!may_be)) {
1352                                                        dm_bufio_release(b);
1353                                                        return ts;
1354                                                }
1355                                                hash_offset = 0;
1356                                                may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
1357                                        }
1358                                }
1359                        }
1360                }
1361                dm_bufio_release(b);
1362
1363                tag += to_copy;
1364                *metadata_offset += to_copy;
1365                if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) {
1366                        (*metadata_block)++;
1367                        *metadata_offset = 0;
1368                }
1369
1370                if (unlikely(!is_power_of_2(ic->tag_size))) {
1371                        hash_offset = (hash_offset + to_copy) % ic->tag_size;
1372                }
1373
1374                total_size -= to_copy;
1375        } while (unlikely(total_size));
1376
1377        return 0;
1378#undef MAY_BE_FILLER
1379#undef MAY_BE_HASH
1380}
1381
1382static void dm_integrity_flush_buffers(struct dm_integrity_c *ic)
1383{
1384        int r;
1385        r = dm_bufio_write_dirty_buffers(ic->bufio);
1386        if (unlikely(r))
1387                dm_integrity_io_error(ic, "writing tags", r);
1388}
1389
1390static void sleep_on_endio_wait(struct dm_integrity_c *ic)
1391{
1392        DECLARE_WAITQUEUE(wait, current);
1393        __add_wait_queue(&ic->endio_wait, &wait);
1394        __set_current_state(TASK_UNINTERRUPTIBLE);
1395        spin_unlock_irq(&ic->endio_wait.lock);
1396        io_schedule();
1397        spin_lock_irq(&ic->endio_wait.lock);
1398        __remove_wait_queue(&ic->endio_wait, &wait);
1399}
1400
1401static void autocommit_fn(struct timer_list *t)
1402{
1403        struct dm_integrity_c *ic = from_timer(ic, t, autocommit_timer);
1404
1405        if (likely(!dm_integrity_failed(ic)))
1406                queue_work(ic->commit_wq, &ic->commit_work);
1407}
1408
1409static void schedule_autocommit(struct dm_integrity_c *ic)
1410{
1411        if (!timer_pending(&ic->autocommit_timer))
1412                mod_timer(&ic->autocommit_timer, jiffies + ic->autocommit_jiffies);
1413}
1414
1415static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1416{
1417        struct bio *bio;
1418        unsigned long flags;
1419
1420        spin_lock_irqsave(&ic->endio_wait.lock, flags);
1421        bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1422        bio_list_add(&ic->flush_bio_list, bio);
1423        spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1424
1425        queue_work(ic->commit_wq, &ic->commit_work);
1426}
1427
1428static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
1429{
1430        int r = dm_integrity_failed(ic);
1431        if (unlikely(r) && !bio->bi_status)
1432                bio->bi_status = errno_to_blk_status(r);
1433        if (unlikely(ic->synchronous_mode) && bio_op(bio) == REQ_OP_WRITE) {
1434                unsigned long flags;
1435                spin_lock_irqsave(&ic->endio_wait.lock, flags);
1436                bio_list_add(&ic->synchronous_bios, bio);
1437                queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
1438                spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1439                return;
1440        }
1441        bio_endio(bio);
1442}
1443
1444static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1445{
1446        struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1447
1448        if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic)))
1449                submit_flush_bio(ic, dio);
1450        else
1451                do_endio(ic, bio);
1452}
1453
1454static void dec_in_flight(struct dm_integrity_io *dio)
1455{
1456        if (atomic_dec_and_test(&dio->in_flight)) {
1457                struct dm_integrity_c *ic = dio->ic;
1458                struct bio *bio;
1459
1460                remove_range(ic, &dio->range);
1461
1462                if (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))
1463                        schedule_autocommit(ic);
1464
1465                bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1466
1467                if (unlikely(dio->bi_status) && !bio->bi_status)
1468                        bio->bi_status = dio->bi_status;
1469                if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
1470                        dio->range.logical_sector += dio->range.n_sectors;
1471                        bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
1472                        INIT_WORK(&dio->work, integrity_bio_wait);
1473                        queue_work(ic->offload_wq, &dio->work);
1474                        return;
1475                }
1476                do_endio_flush(ic, dio);
1477        }
1478}
1479
1480static void integrity_end_io(struct bio *bio)
1481{
1482        struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1483
1484        dm_bio_restore(&dio->bio_details, bio);
1485        if (bio->bi_integrity)
1486                bio->bi_opf |= REQ_INTEGRITY;
1487
1488        if (dio->completion)
1489                complete(dio->completion);
1490
1491        dec_in_flight(dio);
1492}
1493
1494static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector,
1495                                      const char *data, char *result)
1496{
1497        __u64 sector_le = cpu_to_le64(sector);
1498        SHASH_DESC_ON_STACK(req, ic->internal_hash);
1499        int r;
1500        unsigned digest_size;
1501
1502        req->tfm = ic->internal_hash;
1503
1504        r = crypto_shash_init(req);
1505        if (unlikely(r < 0)) {
1506                dm_integrity_io_error(ic, "crypto_shash_init", r);
1507                goto failed;
1508        }
1509
1510        r = crypto_shash_update(req, (const __u8 *)&sector_le, sizeof sector_le);
1511        if (unlikely(r < 0)) {
1512                dm_integrity_io_error(ic, "crypto_shash_update", r);
1513                goto failed;
1514        }
1515
1516        r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT);
1517        if (unlikely(r < 0)) {
1518                dm_integrity_io_error(ic, "crypto_shash_update", r);
1519                goto failed;
1520        }
1521
1522        r = crypto_shash_final(req, result);
1523        if (unlikely(r < 0)) {
1524                dm_integrity_io_error(ic, "crypto_shash_final", r);
1525                goto failed;
1526        }
1527
1528        digest_size = crypto_shash_digestsize(ic->internal_hash);
1529        if (unlikely(digest_size < ic->tag_size))
1530                memset(result + digest_size, 0, ic->tag_size - digest_size);
1531
1532        return;
1533
1534failed:
1535        /* this shouldn't happen anyway, the hash functions have no reason to fail */
1536        get_random_bytes(result, ic->tag_size);
1537}
1538
1539static void integrity_metadata(struct work_struct *w)
1540{
1541        struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
1542        struct dm_integrity_c *ic = dio->ic;
1543
1544        int r;
1545
1546        if (ic->internal_hash) {
1547                struct bvec_iter iter;
1548                struct bio_vec bv;
1549                unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1550                struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1551                char *checksums;
1552                unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
1553                char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
1554                sector_t sector;
1555                unsigned sectors_to_process;
1556
1557                if (unlikely(ic->mode == 'R'))
1558                        goto skip_io;
1559
1560                if (likely(dio->op != REQ_OP_DISCARD))
1561                        checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space,
1562                                            GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
1563                else
1564                        checksums = kmalloc(PAGE_SIZE, GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
1565                if (!checksums) {
1566                        checksums = checksums_onstack;
1567                        if (WARN_ON(extra_space &&
1568                                    digest_size > sizeof(checksums_onstack))) {
1569                                r = -EINVAL;
1570                                goto error;
1571                        }
1572                }
1573
1574                if (unlikely(dio->op == REQ_OP_DISCARD)) {
1575                        sector_t bi_sector = dio->bio_details.bi_iter.bi_sector;
1576                        unsigned bi_size = dio->bio_details.bi_iter.bi_size;
1577                        unsigned max_size = likely(checksums != checksums_onstack) ? PAGE_SIZE : HASH_MAX_DIGESTSIZE;
1578                        unsigned max_blocks = max_size / ic->tag_size;
1579                        memset(checksums, DISCARD_FILLER, max_size);
1580
1581                        while (bi_size) {
1582                                unsigned this_step_blocks = bi_size >> (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
1583                                this_step_blocks = min(this_step_blocks, max_blocks);
1584                                r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1585                                                        this_step_blocks * ic->tag_size, TAG_WRITE);
1586                                if (unlikely(r)) {
1587                                        if (likely(checksums != checksums_onstack))
1588                                                kfree(checksums);
1589                                        goto error;
1590                                }
1591
1592                                /*if (bi_size < this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block)) {
1593                                        printk("BUGG: bi_sector: %llx, bi_size: %u\n", bi_sector, bi_size);
1594                                        printk("BUGG: this_step_blocks: %u\n", this_step_blocks);
1595                                        BUG();
1596                                }*/
1597                                bi_size -= this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
1598                                bi_sector += this_step_blocks << ic->sb->log2_sectors_per_block;
1599                        }
1600
1601                        if (likely(checksums != checksums_onstack))
1602                                kfree(checksums);
1603                        goto skip_io;
1604                }
1605
1606                sector = dio->range.logical_sector;
1607                sectors_to_process = dio->range.n_sectors;
1608
1609                __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
1610                        unsigned pos;
1611                        char *mem, *checksums_ptr;
1612
1613again:
1614                        mem = (char *)kmap_atomic(bv.bv_page) + bv.bv_offset;
1615                        pos = 0;
1616                        checksums_ptr = checksums;
1617                        do {
1618                                integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr);
1619                                checksums_ptr += ic->tag_size;
1620                                sectors_to_process -= ic->sectors_per_block;
1621                                pos += ic->sectors_per_block << SECTOR_SHIFT;
1622                                sector += ic->sectors_per_block;
1623                        } while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack);
1624                        kunmap_atomic(mem);
1625
1626                        r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1627                                                checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
1628                        if (unlikely(r)) {
1629                                if (r > 0) {
1630                                        char b[BDEVNAME_SIZE];
1631                                        DMERR_LIMIT("%s: Checksum failed at sector 0x%llx", bio_devname(bio, b),
1632                                                    (sector - ((r + ic->tag_size - 1) / ic->tag_size)));
1633                                        r = -EILSEQ;
1634                                        atomic64_inc(&ic->number_of_mismatches);
1635                                }
1636                                if (likely(checksums != checksums_onstack))
1637                                        kfree(checksums);
1638                                goto error;
1639                        }
1640
1641                        if (!sectors_to_process)
1642                                break;
1643
1644                        if (unlikely(pos < bv.bv_len)) {
1645                                bv.bv_offset += pos;
1646                                bv.bv_len -= pos;
1647                                goto again;
1648                        }
1649                }
1650
1651                if (likely(checksums != checksums_onstack))
1652                        kfree(checksums);
1653        } else {
1654                struct bio_integrity_payload *bip = dio->bio_details.bi_integrity;
1655
1656                if (bip) {
1657                        struct bio_vec biv;
1658                        struct bvec_iter iter;
1659                        unsigned data_to_process = dio->range.n_sectors;
1660                        sector_to_block(ic, data_to_process);
1661                        data_to_process *= ic->tag_size;
1662
1663                        bip_for_each_vec(biv, bip, iter) {
1664                                unsigned char *tag;
1665                                unsigned this_len;
1666
1667                                BUG_ON(PageHighMem(biv.bv_page));
1668                                tag = lowmem_page_address(biv.bv_page) + biv.bv_offset;
1669                                this_len = min(biv.bv_len, data_to_process);
1670                                r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset,
1671                                                        this_len, dio->op == REQ_OP_READ ? TAG_READ : TAG_WRITE);
1672                                if (unlikely(r))
1673                                        goto error;
1674                                data_to_process -= this_len;
1675                                if (!data_to_process)
1676                                        break;
1677                        }
1678                }
1679        }
1680skip_io:
1681        dec_in_flight(dio);
1682        return;
1683error:
1684        dio->bi_status = errno_to_blk_status(r);
1685        dec_in_flight(dio);
1686}
1687
1688static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
1689{
1690        struct dm_integrity_c *ic = ti->private;
1691        struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1692        struct bio_integrity_payload *bip;
1693
1694        sector_t area, offset;
1695
1696        dio->ic = ic;
1697        dio->bi_status = 0;
1698        dio->op = bio_op(bio);
1699
1700        if (unlikely(dio->op == REQ_OP_DISCARD)) {
1701                if (ti->max_io_len) {
1702                        sector_t sec = dm_target_offset(ti, bio->bi_iter.bi_sector);
1703                        unsigned log2_max_io_len = __fls(ti->max_io_len);
1704                        sector_t start_boundary = sec >> log2_max_io_len;
1705                        sector_t end_boundary = (sec + bio_sectors(bio) - 1) >> log2_max_io_len;
1706                        if (start_boundary < end_boundary) {
1707                                sector_t len = ti->max_io_len - (sec & (ti->max_io_len - 1));
1708                                dm_accept_partial_bio(bio, len);
1709                        }
1710                }
1711        }
1712
1713        if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1714                submit_flush_bio(ic, dio);
1715                return DM_MAPIO_SUBMITTED;
1716        }
1717
1718        dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1719        dio->fua = dio->op == REQ_OP_WRITE && bio->bi_opf & REQ_FUA;
1720        if (unlikely(dio->fua)) {
1721                /*
1722                 * Don't pass down the FUA flag because we have to flush
1723                 * disk cache anyway.
1724                 */
1725                bio->bi_opf &= ~REQ_FUA;
1726        }
1727        if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) {
1728                DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx",
1729                      dio->range.logical_sector, bio_sectors(bio),
1730                      ic->provided_data_sectors);
1731                return DM_MAPIO_KILL;
1732        }
1733        if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) {
1734                DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
1735                      ic->sectors_per_block,
1736                      dio->range.logical_sector, bio_sectors(bio));
1737                return DM_MAPIO_KILL;
1738        }
1739
1740        if (ic->sectors_per_block > 1 && likely(dio->op != REQ_OP_DISCARD)) {
1741                struct bvec_iter iter;
1742                struct bio_vec bv;
1743                bio_for_each_segment(bv, bio, iter) {
1744                        if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
1745                                DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
1746                                        bv.bv_offset, bv.bv_len, ic->sectors_per_block);
1747                                return DM_MAPIO_KILL;
1748                        }
1749                }
1750        }
1751
1752        bip = bio_integrity(bio);
1753        if (!ic->internal_hash) {
1754                if (bip) {
1755                        unsigned wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block;
1756                        if (ic->log2_tag_size >= 0)
1757                                wanted_tag_size <<= ic->log2_tag_size;
1758                        else
1759                                wanted_tag_size *= ic->tag_size;
1760                        if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) {
1761                                DMERR("Invalid integrity data size %u, expected %u",
1762                                      bip->bip_iter.bi_size, wanted_tag_size);
1763                                return DM_MAPIO_KILL;
1764                        }
1765                }
1766        } else {
1767                if (unlikely(bip != NULL)) {
1768                        DMERR("Unexpected integrity data when using internal hash");
1769                        return DM_MAPIO_KILL;
1770                }
1771        }
1772
1773        if (unlikely(ic->mode == 'R') && unlikely(dio->op != REQ_OP_READ))
1774                return DM_MAPIO_KILL;
1775
1776        get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1777        dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1778        bio->bi_iter.bi_sector = get_data_sector(ic, area, offset);
1779
1780        dm_integrity_map_continue(dio, true);
1781        return DM_MAPIO_SUBMITTED;
1782}
1783
1784static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
1785                                 unsigned journal_section, unsigned journal_entry)
1786{
1787        struct dm_integrity_c *ic = dio->ic;
1788        sector_t logical_sector;
1789        unsigned n_sectors;
1790
1791        logical_sector = dio->range.logical_sector;
1792        n_sectors = dio->range.n_sectors;
1793        do {
1794                struct bio_vec bv = bio_iovec(bio);
1795                char *mem;
1796
1797                if (unlikely(bv.bv_len >> SECTOR_SHIFT > n_sectors))
1798                        bv.bv_len = n_sectors << SECTOR_SHIFT;
1799                n_sectors -= bv.bv_len >> SECTOR_SHIFT;
1800                bio_advance_iter(bio, &bio->bi_iter, bv.bv_len);
1801retry_kmap:
1802                mem = kmap_atomic(bv.bv_page);
1803                if (likely(dio->op == REQ_OP_WRITE))
1804                        flush_dcache_page(bv.bv_page);
1805
1806                do {
1807                        struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry);
1808
1809                        if (unlikely(dio->op == REQ_OP_READ)) {
1810                                struct journal_sector *js;
1811                                char *mem_ptr;
1812                                unsigned s;
1813
1814                                if (unlikely(journal_entry_is_inprogress(je))) {
1815                                        flush_dcache_page(bv.bv_page);
1816                                        kunmap_atomic(mem);
1817
1818                                        __io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
1819                                        goto retry_kmap;
1820                                }
1821                                smp_rmb();
1822                                BUG_ON(journal_entry_get_sector(je) != logical_sector);
1823                                js = access_journal_data(ic, journal_section, journal_entry);
1824                                mem_ptr = mem + bv.bv_offset;
1825                                s = 0;
1826                                do {
1827                                        memcpy(mem_ptr, js, JOURNAL_SECTOR_DATA);
1828                                        *(commit_id_t *)(mem_ptr + JOURNAL_SECTOR_DATA) = je->last_bytes[s];
1829                                        js++;
1830                                        mem_ptr += 1 << SECTOR_SHIFT;
1831                                } while (++s < ic->sectors_per_block);
1832#ifdef INTERNAL_VERIFY
1833                                if (ic->internal_hash) {
1834                                        char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
1835
1836                                        integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
1837                                        if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
1838                                                DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx",
1839                                                            logical_sector);
1840                                        }
1841                                }
1842#endif
1843                        }
1844
1845                        if (!ic->internal_hash) {
1846                                struct bio_integrity_payload *bip = bio_integrity(bio);
1847                                unsigned tag_todo = ic->tag_size;
1848                                char *tag_ptr = journal_entry_tag(ic, je);
1849
1850                                if (bip) do {
1851                                        struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
1852                                        unsigned tag_now = min(biv.bv_len, tag_todo);
1853                                        char *tag_addr;
1854                                        BUG_ON(PageHighMem(biv.bv_page));
1855                                        tag_addr = lowmem_page_address(biv.bv_page) + biv.bv_offset;
1856                                        if (likely(dio->op == REQ_OP_WRITE))
1857                                                memcpy(tag_ptr, tag_addr, tag_now);
1858                                        else
1859                                                memcpy(tag_addr, tag_ptr, tag_now);
1860                                        bvec_iter_advance(bip->bip_vec, &bip->bip_iter, tag_now);
1861                                        tag_ptr += tag_now;
1862                                        tag_todo -= tag_now;
1863                                } while (unlikely(tag_todo)); else {
1864                                        if (likely(dio->op == REQ_OP_WRITE))
1865                                                memset(tag_ptr, 0, tag_todo);
1866                                }
1867                        }
1868
1869                        if (likely(dio->op == REQ_OP_WRITE)) {
1870                                struct journal_sector *js;
1871                                unsigned s;
1872
1873                                js = access_journal_data(ic, journal_section, journal_entry);
1874                                memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT);
1875
1876                                s = 0;
1877                                do {
1878                                        je->last_bytes[s] = js[s].commit_id;
1879                                } while (++s < ic->sectors_per_block);
1880
1881                                if (ic->internal_hash) {
1882                                        unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1883                                        if (unlikely(digest_size > ic->tag_size)) {
1884                                                char checksums_onstack[HASH_MAX_DIGESTSIZE];
1885                                                integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
1886                                                memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size);
1887                                        } else
1888                                                integrity_sector_checksum(ic, logical_sector, (char *)js, journal_entry_tag(ic, je));
1889                                }
1890
1891                                journal_entry_set_sector(je, logical_sector);
1892                        }
1893                        logical_sector += ic->sectors_per_block;
1894
1895                        journal_entry++;
1896                        if (unlikely(journal_entry == ic->journal_section_entries)) {
1897                                journal_entry = 0;
1898                                journal_section++;
1899                                wraparound_section(ic, &journal_section);
1900                        }
1901
1902                        bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT;
1903                } while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT);
1904
1905                if (unlikely(dio->op == REQ_OP_READ))
1906                        flush_dcache_page(bv.bv_page);
1907                kunmap_atomic(mem);
1908        } while (n_sectors);
1909
1910        if (likely(dio->op == REQ_OP_WRITE)) {
1911                smp_mb();
1912                if (unlikely(waitqueue_active(&ic->copy_to_journal_wait)))
1913                        wake_up(&ic->copy_to_journal_wait);
1914                if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) {
1915                        queue_work(ic->commit_wq, &ic->commit_work);
1916                } else {
1917                        schedule_autocommit(ic);
1918                }
1919        } else {
1920                remove_range(ic, &dio->range);
1921        }
1922
1923        if (unlikely(bio->bi_iter.bi_size)) {
1924                sector_t area, offset;
1925
1926                dio->range.logical_sector = logical_sector;
1927                get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1928                dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1929                return true;
1930        }
1931
1932        return false;
1933}
1934
1935static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map)
1936{
1937        struct dm_integrity_c *ic = dio->ic;
1938        struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1939        unsigned journal_section, journal_entry;
1940        unsigned journal_read_pos;
1941        struct completion read_comp;
1942        bool discard_retried = false;
1943        bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ;
1944        if (unlikely(dio->op == REQ_OP_DISCARD) && ic->mode != 'D')
1945                need_sync_io = true;
1946
1947        if (need_sync_io && from_map) {
1948                INIT_WORK(&dio->work, integrity_bio_wait);
1949                queue_work(ic->offload_wq, &dio->work);
1950                return;
1951        }
1952
1953lock_retry:
1954        spin_lock_irq(&ic->endio_wait.lock);
1955retry:
1956        if (unlikely(dm_integrity_failed(ic))) {
1957                spin_unlock_irq(&ic->endio_wait.lock);
1958                do_endio(ic, bio);
1959                return;
1960        }
1961        dio->range.n_sectors = bio_sectors(bio);
1962        journal_read_pos = NOT_FOUND;
1963        if (ic->mode == 'J' && likely(dio->op != REQ_OP_DISCARD)) {
1964                if (dio->op == REQ_OP_WRITE) {
1965                        unsigned next_entry, i, pos;
1966                        unsigned ws, we, range_sectors;
1967
1968                        dio->range.n_sectors = min(dio->range.n_sectors,
1969                                                   (sector_t)ic->free_sectors << ic->sb->log2_sectors_per_block);
1970                        if (unlikely(!dio->range.n_sectors)) {
1971                                if (from_map)
1972                                        goto offload_to_thread;
1973                                sleep_on_endio_wait(ic);
1974                                goto retry;
1975                        }
1976                        range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block;
1977                        ic->free_sectors -= range_sectors;
1978                        journal_section = ic->free_section;
1979                        journal_entry = ic->free_section_entry;
1980
1981                        next_entry = ic->free_section_entry + range_sectors;
1982                        ic->free_section_entry = next_entry % ic->journal_section_entries;
1983                        ic->free_section += next_entry / ic->journal_section_entries;
1984                        ic->n_uncommitted_sections += next_entry / ic->journal_section_entries;
1985                        wraparound_section(ic, &ic->free_section);
1986
1987                        pos = journal_section * ic->journal_section_entries + journal_entry;
1988                        ws = journal_section;
1989                        we = journal_entry;
1990                        i = 0;
1991                        do {
1992                                struct journal_entry *je;
1993
1994                                add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i);
1995                                pos++;
1996                                if (unlikely(pos >= ic->journal_entries))
1997                                        pos = 0;
1998
1999                                je = access_journal_entry(ic, ws, we);
2000                                BUG_ON(!journal_entry_is_unused(je));
2001                                journal_entry_set_inprogress(je);
2002                                we++;
2003                                if (unlikely(we == ic->journal_section_entries)) {
2004                                        we = 0;
2005                                        ws++;
2006                                        wraparound_section(ic, &ws);
2007                                }
2008                        } while ((i += ic->sectors_per_block) < dio->range.n_sectors);
2009
2010                        spin_unlock_irq(&ic->endio_wait.lock);
2011                        goto journal_read_write;
2012                } else {
2013                        sector_t next_sector;
2014                        journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2015                        if (likely(journal_read_pos == NOT_FOUND)) {
2016                                if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector))
2017                                        dio->range.n_sectors = next_sector - dio->range.logical_sector;
2018                        } else {
2019                                unsigned i;
2020                                unsigned jp = journal_read_pos + 1;
2021                                for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) {
2022                                        if (!test_journal_node(ic, jp, dio->range.logical_sector + i))
2023                                                break;
2024                                }
2025                                dio->range.n_sectors = i;
2026                        }
2027                }
2028        }
2029        if (unlikely(!add_new_range(ic, &dio->range, true))) {
2030                /*
2031                 * We must not sleep in the request routine because it could
2032                 * stall bios on current->bio_list.
2033                 * So, we offload the bio to a workqueue if we have to sleep.
2034                 */
2035                if (from_map) {
2036offload_to_thread:
2037                        spin_unlock_irq(&ic->endio_wait.lock);
2038                        INIT_WORK(&dio->work, integrity_bio_wait);
2039                        queue_work(ic->wait_wq, &dio->work);
2040                        return;
2041                }
2042                if (journal_read_pos != NOT_FOUND)
2043                        dio->range.n_sectors = ic->sectors_per_block;
2044                wait_and_add_new_range(ic, &dio->range);
2045                /*
2046                 * wait_and_add_new_range drops the spinlock, so the journal
2047                 * may have been changed arbitrarily. We need to recheck.
2048                 * To simplify the code, we restrict I/O size to just one block.
2049                 */
2050                if (journal_read_pos != NOT_FOUND) {
2051                        sector_t next_sector;
2052                        unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2053                        if (unlikely(new_pos != journal_read_pos)) {
2054                                remove_range_unlocked(ic, &dio->range);
2055                                goto retry;
2056                        }
2057                }
2058        }
2059        if (ic->mode == 'J' && likely(dio->op == REQ_OP_DISCARD) && !discard_retried) {
2060                sector_t next_sector;
2061                unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2062                if (unlikely(new_pos != NOT_FOUND) ||
2063                    unlikely(next_sector < dio->range.logical_sector - dio->range.n_sectors)) {
2064                        remove_range_unlocked(ic, &dio->range);
2065                        spin_unlock_irq(&ic->endio_wait.lock);
2066                        queue_work(ic->commit_wq, &ic->commit_work);
2067                        flush_workqueue(ic->commit_wq);
2068                        queue_work(ic->writer_wq, &ic->writer_work);
2069                        flush_workqueue(ic->writer_wq);
2070                        discard_retried = true;
2071                        goto lock_retry;
2072                }
2073        }
2074        spin_unlock_irq(&ic->endio_wait.lock);
2075
2076        if (unlikely(journal_read_pos != NOT_FOUND)) {
2077                journal_section = journal_read_pos / ic->journal_section_entries;
2078                journal_entry = journal_read_pos % ic->journal_section_entries;
2079                goto journal_read_write;
2080        }
2081
2082        if (ic->mode == 'B' && (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))) {
2083                if (!block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2084                                     dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
2085                        struct bitmap_block_status *bbs;
2086
2087                        bbs = sector_to_bitmap_block(ic, dio->range.logical_sector);
2088                        spin_lock(&bbs->bio_queue_lock);
2089                        bio_list_add(&bbs->bio_queue, bio);
2090                        spin_unlock(&bbs->bio_queue_lock);
2091                        queue_work(ic->writer_wq, &bbs->work);
2092                        return;
2093                }
2094        }
2095
2096        dio->in_flight = (atomic_t)ATOMIC_INIT(2);
2097
2098        if (need_sync_io) {
2099                init_completion(&read_comp);
2100                dio->completion = &read_comp;
2101        } else
2102                dio->completion = NULL;
2103
2104        dm_bio_record(&dio->bio_details, bio);
2105        bio_set_dev(bio, ic->dev->bdev);
2106        bio->bi_integrity = NULL;
2107        bio->bi_opf &= ~REQ_INTEGRITY;
2108        bio->bi_end_io = integrity_end_io;
2109        bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
2110
2111        if (unlikely(dio->op == REQ_OP_DISCARD) && likely(ic->mode != 'D')) {
2112                integrity_metadata(&dio->work);
2113                dm_integrity_flush_buffers(ic);
2114
2115                dio->in_flight = (atomic_t)ATOMIC_INIT(1);
2116                dio->completion = NULL;
2117
2118                submit_bio_noacct(bio);
2119
2120                return;
2121        }
2122
2123        submit_bio_noacct(bio);
2124
2125        if (need_sync_io) {
2126                wait_for_completion_io(&read_comp);
2127                if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
2128                    dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector))
2129                        goto skip_check;
2130                if (ic->mode == 'B') {
2131                        if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector,
2132                                             dio->range.n_sectors, BITMAP_OP_TEST_ALL_CLEAR))
2133                                goto skip_check;
2134                }
2135
2136                if (likely(!bio->bi_status))
2137                        integrity_metadata(&dio->work);
2138                else
2139skip_check:
2140                        dec_in_flight(dio);
2141
2142        } else {
2143                INIT_WORK(&dio->work, integrity_metadata);
2144                queue_work(ic->metadata_wq, &dio->work);
2145        }
2146
2147        return;
2148
2149journal_read_write:
2150        if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry)))
2151                goto lock_retry;
2152
2153        do_endio_flush(ic, dio);
2154}
2155
2156
2157static void integrity_bio_wait(struct work_struct *w)
2158{
2159        struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
2160
2161        dm_integrity_map_continue(dio, false);
2162}
2163
2164static void pad_uncommitted(struct dm_integrity_c *ic)
2165{
2166        if (ic->free_section_entry) {
2167                ic->free_sectors -= ic->journal_section_entries - ic->free_section_entry;
2168                ic->free_section_entry = 0;
2169                ic->free_section++;
2170                wraparound_section(ic, &ic->free_section);
2171                ic->n_uncommitted_sections++;
2172        }
2173        if (WARN_ON(ic->journal_sections * ic->journal_section_entries !=
2174                    (ic->n_uncommitted_sections + ic->n_committed_sections) *
2175                    ic->journal_section_entries + ic->free_sectors)) {
2176                DMCRIT("journal_sections %u, journal_section_entries %u, "
2177                       "n_uncommitted_sections %u, n_committed_sections %u, "
2178                       "journal_section_entries %u, free_sectors %u",
2179                       ic->journal_sections, ic->journal_section_entries,
2180                       ic->n_uncommitted_sections, ic->n_committed_sections,
2181                       ic->journal_section_entries, ic->free_sectors);
2182        }
2183}
2184
2185static void integrity_commit(struct work_struct *w)
2186{
2187        struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work);
2188        unsigned commit_start, commit_sections;
2189        unsigned i, j, n;
2190        struct bio *flushes;
2191
2192        del_timer(&ic->autocommit_timer);
2193
2194        spin_lock_irq(&ic->endio_wait.lock);
2195        flushes = bio_list_get(&ic->flush_bio_list);
2196        if (unlikely(ic->mode != 'J')) {
2197                spin_unlock_irq(&ic->endio_wait.lock);
2198                dm_integrity_flush_buffers(ic);
2199                goto release_flush_bios;
2200        }
2201
2202        pad_uncommitted(ic);
2203        commit_start = ic->uncommitted_section;
2204        commit_sections = ic->n_uncommitted_sections;
2205        spin_unlock_irq(&ic->endio_wait.lock);
2206
2207        if (!commit_sections)
2208                goto release_flush_bios;
2209
2210        i = commit_start;
2211        for (n = 0; n < commit_sections; n++) {
2212                for (j = 0; j < ic->journal_section_entries; j++) {
2213                        struct journal_entry *je;
2214                        je = access_journal_entry(ic, i, j);
2215                        io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
2216                }
2217                for (j = 0; j < ic->journal_section_sectors; j++) {
2218                        struct journal_sector *js;
2219                        js = access_journal(ic, i, j);
2220                        js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq);
2221                }
2222                i++;
2223                if (unlikely(i >= ic->journal_sections))
2224                        ic->commit_seq = next_commit_seq(ic->commit_seq);
2225                wraparound_section(ic, &i);
2226        }
2227        smp_rmb();
2228
2229        write_journal(ic, commit_start, commit_sections);
2230
2231        spin_lock_irq(&ic->endio_wait.lock);
2232        ic->uncommitted_section += commit_sections;
2233        wraparound_section(ic, &ic->uncommitted_section);
2234        ic->n_uncommitted_sections -= commit_sections;
2235        ic->n_committed_sections += commit_sections;
2236        spin_unlock_irq(&ic->endio_wait.lock);
2237
2238        if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
2239                queue_work(ic->writer_wq, &ic->writer_work);
2240
2241release_flush_bios:
2242        while (flushes) {
2243                struct bio *next = flushes->bi_next;
2244                flushes->bi_next = NULL;
2245                do_endio(ic, flushes);
2246                flushes = next;
2247        }
2248}
2249
2250static void complete_copy_from_journal(unsigned long error, void *context)
2251{
2252        struct journal_io *io = context;
2253        struct journal_completion *comp = io->comp;
2254        struct dm_integrity_c *ic = comp->ic;
2255        remove_range(ic, &io->range);
2256        mempool_free(io, &ic->journal_io_mempool);
2257        if (unlikely(error != 0))
2258                dm_integrity_io_error(ic, "copying from journal", -EIO);
2259        complete_journal_op(comp);
2260}
2261
2262static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js,
2263                               struct journal_entry *je)
2264{
2265        unsigned s = 0;
2266        do {
2267                js->commit_id = je->last_bytes[s];
2268                js++;
2269        } while (++s < ic->sectors_per_block);
2270}
2271
2272static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
2273                             unsigned write_sections, bool from_replay)
2274{
2275        unsigned i, j, n;
2276        struct journal_completion comp;
2277        struct blk_plug plug;
2278
2279        blk_start_plug(&plug);
2280
2281        comp.ic = ic;
2282        comp.in_flight = (atomic_t)ATOMIC_INIT(1);
2283        init_completion(&comp.comp);
2284
2285        i = write_start;
2286        for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) {
2287#ifndef INTERNAL_VERIFY
2288                if (unlikely(from_replay))
2289#endif
2290                        rw_section_mac(ic, i, false);
2291                for (j = 0; j < ic->journal_section_entries; j++) {
2292                        struct journal_entry *je = access_journal_entry(ic, i, j);
2293                        sector_t sec, area, offset;
2294                        unsigned k, l, next_loop;
2295                        sector_t metadata_block;
2296                        unsigned metadata_offset;
2297                        struct journal_io *io;
2298
2299                        if (journal_entry_is_unused(je))
2300                                continue;
2301                        BUG_ON(unlikely(journal_entry_is_inprogress(je)) && !from_replay);
2302                        sec = journal_entry_get_sector(je);
2303                        if (unlikely(from_replay)) {
2304                                if (unlikely(sec & (unsigned)(ic->sectors_per_block - 1))) {
2305                                        dm_integrity_io_error(ic, "invalid sector in journal", -EIO);
2306                                        sec &= ~(sector_t)(ic->sectors_per_block - 1);
2307                                }
2308                        }
2309                        if (unlikely(sec >= ic->provided_data_sectors))
2310                                continue;
2311                        get_area_and_offset(ic, sec, &area, &offset);
2312                        restore_last_bytes(ic, access_journal_data(ic, i, j), je);
2313                        for (k = j + 1; k < ic->journal_section_entries; k++) {
2314                                struct journal_entry *je2 = access_journal_entry(ic, i, k);
2315                                sector_t sec2, area2, offset2;
2316                                if (journal_entry_is_unused(je2))
2317                                        break;
2318                                BUG_ON(unlikely(journal_entry_is_inprogress(je2)) && !from_replay);
2319                                sec2 = journal_entry_get_sector(je2);
2320                                if (unlikely(sec2 >= ic->provided_data_sectors))
2321                                        break;
2322                                get_area_and_offset(ic, sec2, &area2, &offset2);
2323                                if (area2 != area || offset2 != offset + ((k - j) << ic->sb->log2_sectors_per_block))
2324                                        break;
2325                                restore_last_bytes(ic, access_journal_data(ic, i, k), je2);
2326                        }
2327                        next_loop = k - 1;
2328
2329                        io = mempool_alloc(&ic->journal_io_mempool, GFP_NOIO);
2330                        io->comp = &comp;
2331                        io->range.logical_sector = sec;
2332                        io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block;
2333
2334                        spin_lock_irq(&ic->endio_wait.lock);
2335                        add_new_range_and_wait(ic, &io->range);
2336
2337                        if (likely(!from_replay)) {
2338                                struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries];
2339
2340                                /* don't write if there is newer committed sector */
2341                                while (j < k && find_newer_committed_node(ic, &section_node[j])) {
2342                                        struct journal_entry *je2 = access_journal_entry(ic, i, j);
2343
2344                                        journal_entry_set_unused(je2);
2345                                        remove_journal_node(ic, &section_node[j]);
2346                                        j++;
2347                                        sec += ic->sectors_per_block;
2348                                        offset += ic->sectors_per_block;
2349                                }
2350                                while (j < k && find_newer_committed_node(ic, &section_node[k - 1])) {
2351                                        struct journal_entry *je2 = access_journal_entry(ic, i, k - 1);
2352
2353                                        journal_entry_set_unused(je2);
2354                                        remove_journal_node(ic, &section_node[k - 1]);
2355                                        k--;
2356                                }
2357                                if (j == k) {
2358                                        remove_range_unlocked(ic, &io->range);
2359                                        spin_unlock_irq(&ic->endio_wait.lock);
2360                                        mempool_free(io, &ic->journal_io_mempool);
2361                                        goto skip_io;
2362                                }
2363                                for (l = j; l < k; l++) {
2364                                        remove_journal_node(ic, &section_node[l]);
2365                                }
2366                        }
2367                        spin_unlock_irq(&ic->endio_wait.lock);
2368
2369                        metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2370                        for (l = j; l < k; l++) {
2371                                int r;
2372                                struct journal_entry *je2 = access_journal_entry(ic, i, l);
2373
2374                                if (
2375#ifndef INTERNAL_VERIFY
2376                                    unlikely(from_replay) &&
2377#endif
2378                                    ic->internal_hash) {
2379                                        char test_tag[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
2380
2381                                        integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
2382                                                                  (char *)access_journal_data(ic, i, l), test_tag);
2383                                        if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size)))
2384                                                dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ);
2385                                }
2386
2387                                journal_entry_set_unused(je2);
2388                                r = dm_integrity_rw_tag(ic, journal_entry_tag(ic, je2), &metadata_block, &metadata_offset,
2389                                                        ic->tag_size, TAG_WRITE);
2390                                if (unlikely(r)) {
2391                                        dm_integrity_io_error(ic, "reading tags", r);
2392                                }
2393                        }
2394
2395                        atomic_inc(&comp.in_flight);
2396                        copy_from_journal(ic, i, j << ic->sb->log2_sectors_per_block,
2397                                          (k - j) << ic->sb->log2_sectors_per_block,
2398                                          get_data_sector(ic, area, offset),
2399                                          complete_copy_from_journal, io);
2400skip_io:
2401                        j = next_loop;
2402                }
2403        }
2404
2405        dm_bufio_write_dirty_buffers_async(ic->bufio);
2406
2407        blk_finish_plug(&plug);
2408
2409        complete_journal_op(&comp);
2410        wait_for_completion_io(&comp.comp);
2411
2412        dm_integrity_flush_buffers(ic);
2413}
2414
2415static void integrity_writer(struct work_struct *w)
2416{
2417        struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work);
2418        unsigned write_start, write_sections;
2419
2420        unsigned prev_free_sectors;
2421
2422        /* the following test is not needed, but it tests the replay code */
2423        if (unlikely(dm_post_suspending(ic->ti)) && !ic->meta_dev)
2424                return;
2425
2426        spin_lock_irq(&ic->endio_wait.lock);
2427        write_start = ic->committed_section;
2428        write_sections = ic->n_committed_sections;
2429        spin_unlock_irq(&ic->endio_wait.lock);
2430
2431        if (!write_sections)
2432                return;
2433
2434        do_journal_write(ic, write_start, write_sections, false);
2435
2436        spin_lock_irq(&ic->endio_wait.lock);
2437
2438        ic->committed_section += write_sections;
2439        wraparound_section(ic, &ic->committed_section);
2440        ic->n_committed_sections -= write_sections;
2441
2442        prev_free_sectors = ic->free_sectors;
2443        ic->free_sectors += write_sections * ic->journal_section_entries;
2444        if (unlikely(!prev_free_sectors))
2445                wake_up_locked(&ic->endio_wait);
2446
2447        spin_unlock_irq(&ic->endio_wait.lock);
2448}
2449
2450static void recalc_write_super(struct dm_integrity_c *ic)
2451{
2452        int r;
2453
2454        dm_integrity_flush_buffers(ic);
2455        if (dm_integrity_failed(ic))
2456                return;
2457
2458        r = sync_rw_sb(ic, REQ_OP_WRITE, 0);
2459        if (unlikely(r))
2460                dm_integrity_io_error(ic, "writing superblock", r);
2461}
2462
2463static void integrity_recalc(struct work_struct *w)
2464{
2465        struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work);
2466        struct dm_integrity_range range;
2467        struct dm_io_request io_req;
2468        struct dm_io_region io_loc;
2469        sector_t area, offset;
2470        sector_t metadata_block;
2471        unsigned metadata_offset;
2472        sector_t logical_sector, n_sectors;
2473        __u8 *t;
2474        unsigned i;
2475        int r;
2476        unsigned super_counter = 0;
2477
2478        DEBUG_print("start recalculation... (position %llx)\n", le64_to_cpu(ic->sb->recalc_sector));
2479
2480        spin_lock_irq(&ic->endio_wait.lock);
2481
2482next_chunk:
2483
2484        if (unlikely(dm_post_suspending(ic->ti)))
2485                goto unlock_ret;
2486
2487        range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
2488        if (unlikely(range.logical_sector >= ic->provided_data_sectors)) {
2489                if (ic->mode == 'B') {
2490                        block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
2491                        DEBUG_print("queue_delayed_work: bitmap_flush_work\n");
2492                        queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
2493                }
2494                goto unlock_ret;
2495        }
2496
2497        get_area_and_offset(ic, range.logical_sector, &area, &offset);
2498        range.n_sectors = min((sector_t)RECALC_SECTORS, ic->provided_data_sectors - range.logical_sector);
2499        if (!ic->meta_dev)
2500                range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned)offset);
2501
2502        add_new_range_and_wait(ic, &range);
2503        spin_unlock_irq(&ic->endio_wait.lock);
2504        logical_sector = range.logical_sector;
2505        n_sectors = range.n_sectors;
2506
2507        if (ic->mode == 'B') {
2508                if (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, n_sectors, BITMAP_OP_TEST_ALL_CLEAR)) {
2509                        goto advance_and_next;
2510                }
2511                while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector,
2512                                       ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
2513                        logical_sector += ic->sectors_per_block;
2514                        n_sectors -= ic->sectors_per_block;
2515                        cond_resched();
2516                }
2517                while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector + n_sectors - ic->sectors_per_block,
2518                                       ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
2519                        n_sectors -= ic->sectors_per_block;
2520                        cond_resched();
2521                }
2522                get_area_and_offset(ic, logical_sector, &area, &offset);
2523        }
2524
2525        DEBUG_print("recalculating: %llx, %llx\n", logical_sector, n_sectors);
2526
2527        if (unlikely(++super_counter == RECALC_WRITE_SUPER)) {
2528                recalc_write_super(ic);
2529                if (ic->mode == 'B') {
2530                        queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
2531                }
2532                super_counter = 0;
2533        }
2534
2535        if (unlikely(dm_integrity_failed(ic)))
2536                goto err;
2537
2538        io_req.bi_op = REQ_OP_READ;
2539        io_req.bi_op_flags = 0;
2540        io_req.mem.type = DM_IO_VMA;
2541        io_req.mem.ptr.addr = ic->recalc_buffer;
2542        io_req.notify.fn = NULL;
2543        io_req.client = ic->io;
2544        io_loc.bdev = ic->dev->bdev;
2545        io_loc.sector = get_data_sector(ic, area, offset);
2546        io_loc.count = n_sectors;
2547
2548        r = dm_io(&io_req, 1, &io_loc, NULL);
2549        if (unlikely(r)) {
2550                dm_integrity_io_error(ic, "reading data", r);
2551                goto err;
2552        }
2553
2554        t = ic->recalc_tags;
2555        for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
2556                integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
2557                t += ic->tag_size;
2558        }
2559
2560        metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2561
2562        r = dm_integrity_rw_tag(ic, ic->recalc_tags, &metadata_block, &metadata_offset, t - ic->recalc_tags, TAG_WRITE);
2563        if (unlikely(r)) {
2564                dm_integrity_io_error(ic, "writing tags", r);
2565                goto err;
2566        }
2567
2568        if (ic->mode == 'B') {
2569                sector_t start, end;
2570                start = (range.logical_sector >>
2571                         (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
2572                        (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2573                end = ((range.logical_sector + range.n_sectors) >>
2574                       (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
2575                        (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2576                block_bitmap_op(ic, ic->recalc_bitmap, start, end - start, BITMAP_OP_CLEAR);
2577        }
2578
2579advance_and_next:
2580        cond_resched();
2581
2582        spin_lock_irq(&ic->endio_wait.lock);
2583        remove_range_unlocked(ic, &range);
2584        ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors);
2585        goto next_chunk;
2586
2587err:
2588        remove_range(ic, &range);
2589        return;
2590
2591unlock_ret:
2592        spin_unlock_irq(&ic->endio_wait.lock);
2593
2594        recalc_write_super(ic);
2595}
2596
2597static void bitmap_block_work(struct work_struct *w)
2598{
2599        struct bitmap_block_status *bbs = container_of(w, struct bitmap_block_status, work);
2600        struct dm_integrity_c *ic = bbs->ic;
2601        struct bio *bio;
2602        struct bio_list bio_queue;
2603        struct bio_list waiting;
2604
2605        bio_list_init(&waiting);
2606
2607        spin_lock(&bbs->bio_queue_lock);
2608        bio_queue = bbs->bio_queue;
2609        bio_list_init(&bbs->bio_queue);
2610        spin_unlock(&bbs->bio_queue_lock);
2611
2612        while ((bio = bio_list_pop(&bio_queue))) {
2613                struct dm_integrity_io *dio;
2614
2615                dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
2616
2617                if (block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2618                                    dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
2619                        remove_range(ic, &dio->range);
2620                        INIT_WORK(&dio->work, integrity_bio_wait);
2621                        queue_work(ic->offload_wq, &dio->work);
2622                } else {
2623                        block_bitmap_op(ic, ic->journal, dio->range.logical_sector,
2624                                        dio->range.n_sectors, BITMAP_OP_SET);
2625                        bio_list_add(&waiting, bio);
2626                }
2627        }
2628
2629        if (bio_list_empty(&waiting))
2630                return;
2631
2632        rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC,
2633                           bbs->idx * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT),
2634                           BITMAP_BLOCK_SIZE >> SECTOR_SHIFT, NULL);
2635
2636        while ((bio = bio_list_pop(&waiting))) {
2637                struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
2638
2639                block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2640                                dio->range.n_sectors, BITMAP_OP_SET);
2641
2642                remove_range(ic, &dio->range);
2643                INIT_WORK(&dio->work, integrity_bio_wait);
2644                queue_work(ic->offload_wq, &dio->work);
2645        }
2646
2647        queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
2648}
2649
2650static void bitmap_flush_work(struct work_struct *work)
2651{
2652        struct dm_integrity_c *ic = container_of(work, struct dm_integrity_c, bitmap_flush_work.work);
2653        struct dm_integrity_range range;
2654        unsigned long limit;
2655        struct bio *bio;
2656
2657        dm_integrity_flush_buffers(ic);
2658
2659        range.logical_sector = 0;
2660        range.n_sectors = ic->provided_data_sectors;
2661
2662        spin_lock_irq(&ic->endio_wait.lock);
2663        add_new_range_and_wait(ic, &range);
2664        spin_unlock_irq(&ic->endio_wait.lock);
2665
2666        dm_integrity_flush_buffers(ic);
2667        if (ic->meta_dev)
2668                blkdev_issue_flush(ic->dev->bdev, GFP_NOIO);
2669
2670        limit = ic->provided_data_sectors;
2671        if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
2672                limit = le64_to_cpu(ic->sb->recalc_sector)
2673                        >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)
2674                        << (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2675        }
2676        /*DEBUG_print("zeroing journal\n");*/
2677        block_bitmap_op(ic, ic->journal, 0, limit, BITMAP_OP_CLEAR);
2678        block_bitmap_op(ic, ic->may_write_bitmap, 0, limit, BITMAP_OP_CLEAR);
2679
2680        rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
2681                           ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
2682
2683        spin_lock_irq(&ic->endio_wait.lock);
2684        remove_range_unlocked(ic, &range);
2685        while (unlikely((bio = bio_list_pop(&ic->synchronous_bios)) != NULL)) {
2686                bio_endio(bio);
2687                spin_unlock_irq(&ic->endio_wait.lock);
2688                spin_lock_irq(&ic->endio_wait.lock);
2689        }
2690        spin_unlock_irq(&ic->endio_wait.lock);
2691}
2692
2693
2694static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
2695                         unsigned n_sections, unsigned char commit_seq)
2696{
2697        unsigned i, j, n;
2698
2699        if (!n_sections)
2700                return;
2701
2702        for (n = 0; n < n_sections; n++) {
2703                i = start_section + n;
2704                wraparound_section(ic, &i);
2705                for (j = 0; j < ic->journal_section_sectors; j++) {
2706                        struct journal_sector *js = access_journal(ic, i, j);
2707                        memset(&js->entries, 0, JOURNAL_SECTOR_DATA);
2708                        js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq);
2709                }
2710                for (j = 0; j < ic->journal_section_entries; j++) {
2711                        struct journal_entry *je = access_journal_entry(ic, i, j);
2712                        journal_entry_set_unused(je);
2713                }
2714        }
2715
2716        write_journal(ic, start_section, n_sections);
2717}
2718
2719static int find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, commit_id_t id)
2720{
2721        unsigned char k;
2722        for (k = 0; k < N_COMMIT_IDS; k++) {
2723                if (dm_integrity_commit_id(ic, i, j, k) == id)
2724                        return k;
2725        }
2726        dm_integrity_io_error(ic, "journal commit id", -EIO);
2727        return -EIO;
2728}
2729
2730static void replay_journal(struct dm_integrity_c *ic)
2731{
2732        unsigned i, j;
2733        bool used_commit_ids[N_COMMIT_IDS];
2734        unsigned max_commit_id_sections[N_COMMIT_IDS];
2735        unsigned write_start, write_sections;
2736        unsigned continue_section;
2737        bool journal_empty;
2738        unsigned char unused, last_used, want_commit_seq;
2739
2740        if (ic->mode == 'R')
2741                return;
2742
2743        if (ic->journal_uptodate)
2744                return;
2745
2746        last_used = 0;
2747        write_start = 0;
2748
2749        if (!ic->just_formatted) {
2750                DEBUG_print("reading journal\n");
2751                rw_journal(ic, REQ_OP_READ, 0, 0, ic->journal_sections, NULL);
2752                if (ic->journal_io)
2753                        DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal");
2754                if (ic->journal_io) {
2755                        struct journal_completion crypt_comp;
2756                        crypt_comp.ic = ic;
2757                        init_completion(&crypt_comp.comp);
2758                        crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0);
2759                        encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp);
2760                        wait_for_completion(&crypt_comp.comp);
2761                }
2762                DEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, "decrypted journal");
2763        }
2764
2765        if (dm_integrity_failed(ic))
2766                goto clear_journal;
2767
2768        journal_empty = true;
2769        memset(used_commit_ids, 0, sizeof used_commit_ids);
2770        memset(max_commit_id_sections, 0, sizeof max_commit_id_sections);
2771        for (i = 0; i < ic->journal_sections; i++) {
2772                for (j = 0; j < ic->journal_section_sectors; j++) {
2773                        int k;
2774                        struct journal_sector *js = access_journal(ic, i, j);
2775                        k = find_commit_seq(ic, i, j, js->commit_id);
2776                        if (k < 0)
2777                                goto clear_journal;
2778                        used_commit_ids[k] = true;
2779                        max_commit_id_sections[k] = i;
2780                }
2781                if (journal_empty) {
2782                        for (j = 0; j < ic->journal_section_entries; j++) {
2783                                struct journal_entry *je = access_journal_entry(ic, i, j);
2784                                if (!journal_entry_is_unused(je)) {
2785                                        journal_empty = false;
2786                                        break;
2787                                }
2788                        }
2789                }
2790        }
2791
2792        if (!used_commit_ids[N_COMMIT_IDS - 1]) {
2793                unused = N_COMMIT_IDS - 1;
2794                while (unused && !used_commit_ids[unused - 1])
2795                        unused--;
2796        } else {
2797                for (unused = 0; unused < N_COMMIT_IDS; unused++)
2798                        if (!used_commit_ids[unused])
2799                                break;
2800                if (unused == N_COMMIT_IDS) {
2801                        dm_integrity_io_error(ic, "journal commit ids", -EIO);
2802                        goto clear_journal;
2803                }
2804        }
2805        DEBUG_print("first unused commit seq %d [%d,%d,%d,%d]\n",
2806                    unused, used_commit_ids[0], used_commit_ids[1],
2807                    used_commit_ids[2], used_commit_ids[3]);
2808
2809        last_used = prev_commit_seq(unused);
2810        want_commit_seq = prev_commit_seq(last_used);
2811
2812        if (!used_commit_ids[want_commit_seq] && used_commit_ids[prev_commit_seq(want_commit_seq)])
2813                journal_empty = true;
2814
2815        write_start = max_commit_id_sections[last_used] + 1;
2816        if (unlikely(write_start >= ic->journal_sections))
2817                want_commit_seq = next_commit_seq(want_commit_seq);
2818        wraparound_section(ic, &write_start);
2819
2820        i = write_start;
2821        for (write_sections = 0; write_sections < ic->journal_sections; write_sections++) {
2822                for (j = 0; j < ic->journal_section_sectors; j++) {
2823                        struct journal_sector *js = access_journal(ic, i, j);
2824
2825                        if (js->commit_id != dm_integrity_commit_id(ic, i, j, want_commit_seq)) {
2826                                /*
2827                                 * This could be caused by crash during writing.
2828                                 * We won't replay the inconsistent part of the
2829                                 * journal.
2830                                 */
2831                                DEBUG_print("commit id mismatch at position (%u, %u): %d != %d\n",
2832                                            i, j, find_commit_seq(ic, i, j, js->commit_id), want_commit_seq);
2833                                goto brk;
2834                        }
2835                }
2836                i++;
2837                if (unlikely(i >= ic->journal_sections))
2838                        want_commit_seq = next_commit_seq(want_commit_seq);
2839                wraparound_section(ic, &i);
2840        }
2841brk:
2842
2843        if (!journal_empty) {
2844                DEBUG_print("replaying %u sections, starting at %u, commit seq %d\n",
2845                            write_sections, write_start, want_commit_seq);
2846                do_journal_write(ic, write_start, write_sections, true);
2847        }
2848
2849        if (write_sections == ic->journal_sections && (ic->mode == 'J' || journal_empty)) {
2850                continue_section = write_start;
2851                ic->commit_seq = want_commit_seq;
2852                DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq);
2853        } else {
2854                unsigned s;
2855                unsigned char erase_seq;
2856clear_journal:
2857                DEBUG_print("clearing journal\n");
2858
2859                erase_seq = prev_commit_seq(prev_commit_seq(last_used));
2860                s = write_start;
2861                init_journal(ic, s, 1, erase_seq);
2862                s++;
2863                wraparound_section(ic, &s);
2864                if (ic->journal_sections >= 2) {
2865                        init_journal(ic, s, ic->journal_sections - 2, erase_seq);
2866                        s += ic->journal_sections - 2;
2867                        wraparound_section(ic, &s);
2868                        init_journal(ic, s, 1, erase_seq);
2869                }
2870
2871                continue_section = 0;
2872                ic->commit_seq = next_commit_seq(erase_seq);
2873        }
2874
2875        ic->committed_section = continue_section;
2876        ic->n_committed_sections = 0;
2877
2878        ic->uncommitted_section = continue_section;
2879        ic->n_uncommitted_sections = 0;
2880
2881        ic->free_section = continue_section;
2882        ic->free_section_entry = 0;
2883        ic->free_sectors = ic->journal_entries;
2884
2885        ic->journal_tree_root = RB_ROOT;
2886        for (i = 0; i < ic->journal_entries; i++)
2887                init_journal_node(&ic->journal_tree[i]);
2888}
2889
2890static void dm_integrity_enter_synchronous_mode(struct dm_integrity_c *ic)
2891{
2892        DEBUG_print("dm_integrity_enter_synchronous_mode\n");
2893
2894        if (ic->mode == 'B') {
2895                ic->bitmap_flush_interval = msecs_to_jiffies(10) + 1;
2896                ic->synchronous_mode = 1;
2897
2898                cancel_delayed_work_sync(&ic->bitmap_flush_work);
2899                queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
2900                flush_workqueue(ic->commit_wq);
2901        }
2902}
2903
2904static int dm_integrity_reboot(struct notifier_block *n, unsigned long code, void *x)
2905{
2906        struct dm_integrity_c *ic = container_of(n, struct dm_integrity_c, reboot_notifier);
2907
2908        DEBUG_print("dm_integrity_reboot\n");
2909
2910        dm_integrity_enter_synchronous_mode(ic);
2911
2912        return NOTIFY_DONE;
2913}
2914
2915static void dm_integrity_postsuspend(struct dm_target *ti)
2916{
2917        struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2918        int r;
2919
2920        WARN_ON(unregister_reboot_notifier(&ic->reboot_notifier));
2921
2922        del_timer_sync(&ic->autocommit_timer);
2923
2924        if (ic->recalc_wq)
2925                drain_workqueue(ic->recalc_wq);
2926
2927        if (ic->mode == 'B')
2928                cancel_delayed_work_sync(&ic->bitmap_flush_work);
2929
2930        queue_work(ic->commit_wq, &ic->commit_work);
2931        drain_workqueue(ic->commit_wq);
2932
2933        if (ic->mode == 'J') {
2934                if (ic->meta_dev)
2935                        queue_work(ic->writer_wq, &ic->writer_work);
2936                drain_workqueue(ic->writer_wq);
2937                dm_integrity_flush_buffers(ic);
2938        }
2939
2940        if (ic->mode == 'B') {
2941                dm_integrity_flush_buffers(ic);
2942#if 1
2943                /* set to 0 to test bitmap replay code */
2944                init_journal(ic, 0, ic->journal_sections, 0);
2945                ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
2946                r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
2947                if (unlikely(r))
2948                        dm_integrity_io_error(ic, "writing superblock", r);
2949#endif
2950        }
2951
2952        BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
2953
2954        ic->journal_uptodate = true;
2955}
2956
2957static void dm_integrity_resume(struct dm_target *ti)
2958{
2959        struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2960        __u64 old_provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors);
2961        int r;
2962
2963        DEBUG_print("resume\n");
2964
2965        if (ic->provided_data_sectors != old_provided_data_sectors) {
2966                if (ic->provided_data_sectors > old_provided_data_sectors &&
2967                    ic->mode == 'B' &&
2968                    ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) {
2969                        rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
2970                                           ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
2971                        block_bitmap_op(ic, ic->journal, old_provided_data_sectors,
2972                                        ic->provided_data_sectors - old_provided_data_sectors, BITMAP_OP_SET);
2973                        rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
2974                                           ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
2975                }
2976
2977                ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
2978                r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
2979                if (unlikely(r))
2980                        dm_integrity_io_error(ic, "writing superblock", r);
2981        }
2982
2983        if (ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) {
2984                DEBUG_print("resume dirty_bitmap\n");
2985                rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
2986                                   ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
2987                if (ic->mode == 'B') {
2988                        if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) {
2989                                block_bitmap_copy(ic, ic->recalc_bitmap, ic->journal);
2990                                block_bitmap_copy(ic, ic->may_write_bitmap, ic->journal);
2991                                if (!block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors,
2992                                                     BITMAP_OP_TEST_ALL_CLEAR)) {
2993                                        ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
2994                                        ic->sb->recalc_sector = cpu_to_le64(0);
2995                                }
2996                        } else {
2997                                DEBUG_print("non-matching blocks_per_bitmap_bit: %u, %u\n",
2998                                            ic->sb->log2_blocks_per_bitmap_bit, ic->log2_blocks_per_bitmap_bit);
2999                                ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
3000                                block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3001                                block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3002                                block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3003                                rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
3004                                                   ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3005                                ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3006                                ic->sb->recalc_sector = cpu_to_le64(0);
3007                        }
3008                } else {
3009                        if (!(ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
3010                              block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR))) {
3011                                ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3012                                ic->sb->recalc_sector = cpu_to_le64(0);
3013                        }
3014                        init_journal(ic, 0, ic->journal_sections, 0);
3015                        replay_journal(ic);
3016                        ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3017                }
3018                r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3019                if (unlikely(r))
3020                        dm_integrity_io_error(ic, "writing superblock", r);
3021        } else {
3022                replay_journal(ic);
3023                if (ic->mode == 'B') {
3024                        ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3025                        ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
3026                        r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3027                        if (unlikely(r))
3028                                dm_integrity_io_error(ic, "writing superblock", r);
3029
3030                        block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3031                        block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3032                        block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3033                        if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
3034                            le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors) {
3035                                block_bitmap_op(ic, ic->journal, le64_to_cpu(ic->sb->recalc_sector),
3036                                                ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3037                                block_bitmap_op(ic, ic->recalc_bitmap, le64_to_cpu(ic->sb->recalc_sector),
3038                                                ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3039                                block_bitmap_op(ic, ic->may_write_bitmap, le64_to_cpu(ic->sb->recalc_sector),
3040                                                ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3041                        }
3042                        rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
3043                                           ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3044                }
3045        }
3046
3047        DEBUG_print("testing recalc: %x\n", ic->sb->flags);
3048        if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
3049                __u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector);
3050                DEBUG_print("recalc pos: %llx / %llx\n", recalc_pos, ic->provided_data_sectors);
3051                if (recalc_pos < ic->provided_data_sectors) {
3052                        queue_work(ic->recalc_wq, &ic->recalc_work);
3053                } else if (recalc_pos > ic->provided_data_sectors) {
3054                        ic->sb->recalc_sector = cpu_to_le64(ic->provided_data_sectors);
3055                        recalc_write_super(ic);
3056                }
3057        }
3058
3059        ic->reboot_notifier.notifier_call = dm_integrity_reboot;
3060        ic->reboot_notifier.next = NULL;
3061        ic->reboot_notifier.priority = INT_MAX - 1;     /* be notified after md and before hardware drivers */
3062        WARN_ON(register_reboot_notifier(&ic->reboot_notifier));
3063
3064#if 0
3065        /* set to 1 to stress test synchronous mode */
3066        dm_integrity_enter_synchronous_mode(ic);
3067#endif
3068}
3069
3070static void dm_integrity_status(struct dm_target *ti, status_type_t type,
3071                                unsigned status_flags, char *result, unsigned maxlen)
3072{
3073        struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
3074        unsigned arg_count;
3075        size_t sz = 0;
3076
3077        switch (type) {
3078        case STATUSTYPE_INFO:
3079                DMEMIT("%llu %llu",
3080                        (unsigned long long)atomic64_read(&ic->number_of_mismatches),
3081                        ic->provided_data_sectors);
3082                if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
3083                        DMEMIT(" %llu", le64_to_cpu(ic->sb->recalc_sector));
3084                else
3085                        DMEMIT(" -");
3086                break;
3087
3088        case STATUSTYPE_TABLE: {
3089                __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
3090                watermark_percentage += ic->journal_entries / 2;
3091                do_div(watermark_percentage, ic->journal_entries);
3092                arg_count = 3;
3093                arg_count += !!ic->meta_dev;
3094                arg_count += ic->sectors_per_block != 1;
3095                arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING));
3096                arg_count += ic->discard;
3097                arg_count += ic->mode == 'J';
3098                arg_count += ic->mode == 'J';
3099                arg_count += ic->mode == 'B';
3100                arg_count += ic->mode == 'B';
3101                arg_count += !!ic->internal_hash_alg.alg_string;
3102                arg_count += !!ic->journal_crypt_alg.alg_string;
3103                arg_count += !!ic->journal_mac_alg.alg_string;
3104                arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0;
3105                DMEMIT("%s %llu %u %c %u", ic->dev->name, ic->start,
3106                       ic->tag_size, ic->mode, arg_count);
3107                if (ic->meta_dev)
3108                        DMEMIT(" meta_device:%s", ic->meta_dev->name);
3109                if (ic->sectors_per_block != 1)
3110                        DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
3111                if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
3112                        DMEMIT(" recalculate");
3113                if (ic->discard)
3114                        DMEMIT(" allow_discards");
3115                DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
3116                DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
3117                DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
3118                if (ic->mode == 'J') {
3119                        DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage);
3120                        DMEMIT(" commit_time:%u", ic->autocommit_msec);
3121                }
3122                if (ic->mode == 'B') {
3123                        DMEMIT(" sectors_per_bit:%llu", (sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit);
3124                        DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic->bitmap_flush_interval));
3125                }
3126                if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0)
3127                        DMEMIT(" fix_padding");
3128
3129#define EMIT_ALG(a, n)                                                  \
3130                do {                                                    \
3131                        if (ic->a.alg_string) {                         \
3132                                DMEMIT(" %s:%s", n, ic->a.alg_string);  \
3133                                if (ic->a.key_string)                   \
3134                                        DMEMIT(":%s", ic->a.key_string);\
3135                        }                                               \
3136                } while (0)
3137                EMIT_ALG(internal_hash_alg, "internal_hash");
3138                EMIT_ALG(journal_crypt_alg, "journal_crypt");
3139                EMIT_ALG(journal_mac_alg, "journal_mac");
3140                break;
3141        }
3142        }
3143}
3144
3145static int dm_integrity_iterate_devices(struct dm_target *ti,
3146                                        iterate_devices_callout_fn fn, void *data)
3147{
3148        struct dm_integrity_c *ic = ti->private;
3149
3150        if (!ic->meta_dev)
3151                return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data);
3152        else
3153                return fn(ti, ic->dev, 0, ti->len, data);
3154}
3155
3156static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *limits)
3157{
3158        struct dm_integrity_c *ic = ti->private;
3159
3160        if (ic->sectors_per_block > 1) {
3161                limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
3162                limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
3163                blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
3164        }
3165}
3166
3167static void calculate_journal_section_size(struct dm_integrity_c *ic)
3168{
3169        unsigned sector_space = JOURNAL_SECTOR_DATA;
3170
3171        ic->journal_sections = le32_to_cpu(ic->sb->journal_sections);
3172        ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block]) + ic->tag_size,
3173                                         JOURNAL_ENTRY_ROUNDUP);
3174
3175        if (ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC))
3176                sector_space -= JOURNAL_MAC_PER_SECTOR;
3177        ic->journal_entries_per_sector = sector_space / ic->journal_entry_size;
3178        ic->journal_section_entries = ic->journal_entries_per_sector * JOURNAL_BLOCK_SECTORS;
3179        ic->journal_section_sectors = (ic->journal_section_entries << ic->sb->log2_sectors_per_block) + JOURNAL_BLOCK_SECTORS;
3180        ic->journal_entries = ic->journal_section_entries * ic->journal_sections;
3181}
3182
3183static int calculate_device_limits(struct dm_integrity_c *ic)
3184{
3185        __u64 initial_sectors;
3186
3187        calculate_journal_section_size(ic);
3188        initial_sectors = SB_SECTORS + (__u64)ic->journal_section_sectors * ic->journal_sections;
3189        if (initial_sectors + METADATA_PADDING_SECTORS >= ic->meta_device_sectors || initial_sectors > UINT_MAX)
3190                return -EINVAL;
3191        ic->initial_sectors = initial_sectors;
3192
3193        if (!ic->meta_dev) {
3194                sector_t last_sector, last_area, last_offset;
3195
3196                /* we have to maintain excessive padding for compatibility with existing volumes */
3197                __u64 metadata_run_padding =
3198                        ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING) ?
3199                        (__u64)(METADATA_PADDING_SECTORS << SECTOR_SHIFT) :
3200                        (__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS);
3201
3202                ic->metadata_run = round_up((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block),
3203                                            metadata_run_padding) >> SECTOR_SHIFT;
3204                if (!(ic->metadata_run & (ic->metadata_run - 1)))
3205                        ic->log2_metadata_run = __ffs(ic->metadata_run);
3206                else
3207                        ic->log2_metadata_run = -1;
3208
3209                get_area_and_offset(ic, ic->provided_data_sectors - 1, &last_area, &last_offset);
3210                last_sector = get_data_sector(ic, last_area, last_offset);
3211                if (last_sector < ic->start || last_sector >= ic->meta_device_sectors)
3212                        return -EINVAL;
3213        } else {
3214                __u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
3215                meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1))
3216                                >> (ic->log2_buffer_sectors + SECTOR_SHIFT);
3217                meta_size <<= ic->log2_buffer_sectors;
3218                if (ic->initial_sectors + meta_size < ic->initial_sectors ||
3219                    ic->initial_sectors + meta_size > ic->meta_device_sectors)
3220                        return -EINVAL;
3221                ic->metadata_run = 1;
3222                ic->log2_metadata_run = 0;
3223        }
3224
3225        return 0;
3226}
3227
3228static void get_provided_data_sectors(struct dm_integrity_c *ic)
3229{
3230        if (!ic->meta_dev) {
3231                int test_bit;
3232                ic->provided_data_sectors = 0;
3233                for (test_bit = fls64(ic->meta_device_sectors) - 1; test_bit >= 3; test_bit--) {
3234                        __u64 prev_data_sectors = ic->provided_data_sectors;
3235
3236                        ic->provided_data_sectors |= (sector_t)1 << test_bit;
3237                        if (calculate_device_limits(ic))
3238                                ic->provided_data_sectors = prev_data_sectors;
3239                }
3240        } else {
3241                ic->provided_data_sectors = ic->data_device_sectors;
3242                ic->provided_data_sectors &= ~(sector_t)(ic->sectors_per_block - 1);
3243        }
3244}
3245
3246static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sectors, unsigned interleave_sectors)
3247{
3248        unsigned journal_sections;
3249        int test_bit;
3250
3251        memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT);
3252        memcpy(ic->sb->magic, SB_MAGIC, 8);
3253        ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size);
3254        ic->sb->log2_sectors_per_block = __ffs(ic->sectors_per_block);
3255        if (ic->journal_mac_alg.alg_string)
3256                ic->sb->flags |= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC);
3257
3258        calculate_journal_section_size(ic);
3259        journal_sections = journal_sectors / ic->journal_section_sectors;
3260        if (!journal_sections)
3261                journal_sections = 1;
3262
3263        if (!ic->meta_dev) {
3264                if (ic->fix_padding)
3265                        ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_PADDING);
3266                ic->sb->journal_sections = cpu_to_le32(journal_sections);
3267                if (!interleave_sectors)
3268                        interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
3269                ic->sb->log2_interleave_sectors = __fls(interleave_sectors);
3270                ic->sb->log2_interleave_sectors = max((__u8)MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
3271                ic->sb->log2_interleave_sectors = min((__u8)MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
3272
3273                get_provided_data_sectors(ic);
3274                if (!ic->provided_data_sectors)
3275                        return -EINVAL;
3276        } else {
3277                ic->sb->log2_interleave_sectors = 0;
3278
3279                get_provided_data_sectors(ic);
3280                if (!ic->provided_data_sectors)
3281                        return -EINVAL;
3282
3283try_smaller_buffer:
3284                ic->sb->journal_sections = cpu_to_le32(0);
3285                for (test_bit = fls(journal_sections) - 1; test_bit >= 0; test_bit--) {
3286                        __u32 prev_journal_sections = le32_to_cpu(ic->sb->journal_sections);
3287                        __u32 test_journal_sections = prev_journal_sections | (1U << test_bit);
3288                        if (test_journal_sections > journal_sections)
3289                                continue;
3290                        ic->sb->journal_sections = cpu_to_le32(test_journal_sections);
3291                        if (calculate_device_limits(ic))
3292                                ic->sb->journal_sections = cpu_to_le32(prev_journal_sections);
3293
3294                }
3295                if (!le32_to_cpu(ic->sb->journal_sections)) {
3296                        if (ic->log2_buffer_sectors > 3) {
3297                                ic->log2_buffer_sectors--;
3298                                goto try_smaller_buffer;
3299                        }
3300                        return -EINVAL;
3301                }
3302        }
3303
3304        ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
3305
3306        sb_set_version(ic);
3307
3308        return 0;
3309}
3310
3311static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
3312{
3313        struct gendisk *disk = dm_disk(dm_table_get_md(ti->table));
3314        struct blk_integrity bi;
3315
3316        memset(&bi, 0, sizeof(bi));
3317        bi.profile = &dm_integrity_profile;
3318        bi.tuple_size = ic->tag_size;
3319        bi.tag_size = bi.tuple_size;
3320        bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT;
3321
3322        blk_integrity_register(disk, &bi);
3323        blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
3324}
3325
3326static void dm_integrity_free_page_list(struct page_list *pl)
3327{
3328        unsigned i;
3329
3330        if (!pl)
3331                return;
3332        for (i = 0; pl[i].page; i++)
3333                __free_page(pl[i].page);
3334        kvfree(pl);
3335}
3336
3337static struct page_list *dm_integrity_alloc_page_list(unsigned n_pages)
3338{
3339        struct page_list *pl;
3340        unsigned i;
3341
3342        pl = kvmalloc_array(n_pages + 1, sizeof(struct page_list), GFP_KERNEL | __GFP_ZERO);
3343        if (!pl)
3344                return NULL;
3345
3346        for (i = 0; i < n_pages; i++) {
3347                pl[i].page = alloc_page(GFP_KERNEL);
3348                if (!pl[i].page) {
3349                        dm_integrity_free_page_list(pl);
3350                        return NULL;
3351                }
3352                if (i)
3353                        pl[i - 1].next = &pl[i];
3354        }
3355        pl[i].page = NULL;
3356        pl[i].next = NULL;
3357
3358        return pl;
3359}
3360
3361static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl)
3362{
3363        unsigned i;
3364        for (i = 0; i < ic->journal_sections; i++)
3365                kvfree(sl[i]);
3366        kvfree(sl);
3367}
3368
3369static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic,
3370                                                                   struct page_list *pl)
3371{
3372        struct scatterlist **sl;
3373        unsigned i;
3374
3375        sl = kvmalloc_array(ic->journal_sections,
3376                            sizeof(struct scatterlist *),
3377                            GFP_KERNEL | __GFP_ZERO);
3378        if (!sl)
3379                return NULL;
3380
3381        for (i = 0; i < ic->journal_sections; i++) {
3382                struct scatterlist *s;
3383                unsigned start_index, start_offset;
3384                unsigned end_index, end_offset;
3385                unsigned n_pages;
3386                unsigned idx;
3387
3388                page_list_location(ic, i, 0, &start_index, &start_offset);
3389                page_list_location(ic, i, ic->journal_section_sectors - 1,
3390                                   &end_index, &end_offset);
3391
3392                n_pages = (end_index - start_index + 1);
3393
3394                s = kvmalloc_array(n_pages, sizeof(struct scatterlist),
3395                                   GFP_KERNEL);
3396                if (!s) {
3397                        dm_integrity_free_journal_scatterlist(ic, sl);
3398                        return NULL;
3399                }
3400
3401                sg_init_table(s, n_pages);
3402                for (idx = start_index; idx <= end_index; idx++) {
3403                        char *va = lowmem_page_address(pl[idx].page);
3404                        unsigned start = 0, end = PAGE_SIZE;
3405                        if (idx == start_index)
3406                                start = start_offset;
3407                        if (idx == end_index)
3408                                end = end_offset + (1 << SECTOR_SHIFT);
3409                        sg_set_buf(&s[idx - start_index], va + start, end - start);
3410                }
3411
3412                sl[i] = s;
3413        }
3414
3415        return sl;
3416}
3417
3418static void free_alg(struct alg_spec *a)
3419{
3420        kfree_sensitive(a->alg_string);
3421        kfree_sensitive(a->key);
3422        memset(a, 0, sizeof *a);
3423}
3424
3425static int get_alg_and_key(const char *arg, struct alg_spec *a, char **error, char *error_inval)
3426{
3427        char *k;
3428
3429        free_alg(a);
3430
3431        a->alg_string = kstrdup(strchr(arg, ':') + 1, GFP_KERNEL);
3432        if (!a->alg_string)
3433                goto nomem;
3434
3435        k = strchr(a->alg_string, ':');
3436        if (k) {
3437                *k = 0;
3438                a->key_string = k + 1;
3439                if (strlen(a->key_string) & 1)
3440                        goto inval;
3441
3442                a->key_size = strlen(a->key_string) / 2;
3443                a->key = kmalloc(a->key_size, GFP_KERNEL);
3444                if (!a->key)
3445                        goto nomem;
3446                if (hex2bin(a->key, a->key_string, a->key_size))
3447                        goto inval;
3448        }
3449
3450        return 0;
3451inval:
3452        *error = error_inval;
3453        return -EINVAL;
3454nomem:
3455        *error = "Out of memory for an argument";
3456        return -ENOMEM;
3457}
3458
3459static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
3460                   char *error_alg, char *error_key)
3461{
3462        int r;
3463
3464        if (a->alg_string) {
3465                *hash = crypto_alloc_shash(a->alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
3466                if (IS_ERR(*hash)) {
3467                        *error = error_alg;
3468                        r = PTR_ERR(*hash);
3469                        *hash = NULL;
3470                        return r;
3471                }
3472
3473                if (a->key) {
3474                        r = crypto_shash_setkey(*hash, a->key, a->key_size);
3475                        if (r) {
3476                                *error = error_key;
3477                                return r;
3478                        }
3479                } else if (crypto_shash_get_flags(*hash) & CRYPTO_TFM_NEED_KEY) {
3480                        *error = error_key;
3481                        return -ENOKEY;
3482                }
3483        }
3484
3485        return 0;
3486}
3487
3488static int create_journal(struct dm_integrity_c *ic, char **error)
3489{
3490        int r = 0;
3491        unsigned i;
3492        __u64 journal_pages, journal_desc_size, journal_tree_size;
3493        unsigned char *crypt_data = NULL, *crypt_iv = NULL;
3494        struct skcipher_request *req = NULL;
3495
3496        ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
3497        ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
3498        ic->commit_ids[2] = cpu_to_le64(0x3333333333333333ULL);
3499        ic->commit_ids[3] = cpu_to_le64(0x4444444444444444ULL);
3500
3501        journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors,
3502                                PAGE_SIZE >> SECTOR_SHIFT) >> (PAGE_SHIFT - SECTOR_SHIFT);
3503        journal_desc_size = journal_pages * sizeof(struct page_list);
3504        if (journal_pages >= totalram_pages() - totalhigh_pages() || journal_desc_size > ULONG_MAX) {
3505                *error = "Journal doesn't fit into memory";
3506                r = -ENOMEM;
3507                goto bad;
3508        }
3509        ic->journal_pages = journal_pages;
3510
3511        ic->journal = dm_integrity_alloc_page_list(ic->journal_pages);
3512        if (!ic->journal) {
3513                *error = "Could not allocate memory for journal";
3514                r = -ENOMEM;
3515                goto bad;
3516        }
3517        if (ic->journal_crypt_alg.alg_string) {
3518                unsigned ivsize, blocksize;
3519                struct journal_completion comp;
3520
3521                comp.ic = ic;
3522                ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
3523                if (IS_ERR(ic->journal_crypt)) {
3524                        *error = "Invalid journal cipher";
3525                        r = PTR_ERR(ic->journal_crypt);
3526                        ic->journal_crypt = NULL;
3527                        goto bad;
3528                }
3529                ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
3530                blocksize = crypto_skcipher_blocksize(ic->journal_crypt);
3531
3532                if (ic->journal_crypt_alg.key) {
3533                        r = crypto_skcipher_setkey(ic->journal_crypt, ic->journal_crypt_alg.key,
3534                                                   ic->journal_crypt_alg.key_size);
3535                        if (r) {
3536                                *error = "Error setting encryption key";
3537                                goto bad;
3538                        }
3539                }
3540                DEBUG_print("cipher %s, block size %u iv size %u\n",
3541                            ic->journal_crypt_alg.alg_string, blocksize, ivsize);
3542
3543                ic->journal_io = dm_integrity_alloc_page_list(ic->journal_pages);
3544                if (!ic->journal_io) {
3545                        *error = "Could not allocate memory for journal io";
3546                        r = -ENOMEM;
3547                        goto bad;
3548                }
3549
3550                if (blocksize == 1) {
3551                        struct scatterlist *sg;
3552
3553                        req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3554                        if (!req) {
3555                                *error = "Could not allocate crypt request";
3556                                r = -ENOMEM;
3557                                goto bad;
3558                        }
3559
3560                        crypt_iv = kzalloc(ivsize, GFP_KERNEL);
3561                        if (!crypt_iv) {
3562                                *error = "Could not allocate iv";
3563                                r = -ENOMEM;
3564                                goto bad;
3565                        }
3566
3567                        ic->journal_xor = dm_integrity_alloc_page_list(ic->journal_pages);
3568                        if (!ic->journal_xor) {
3569                                *error = "Could not allocate memory for journal xor";
3570                                r = -ENOMEM;
3571                                goto bad;
3572                        }
3573
3574                        sg = kvmalloc_array(ic->journal_pages + 1,
3575                                            sizeof(struct scatterlist),
3576                                            GFP_KERNEL);
3577                        if (!sg) {
3578                                *error = "Unable to allocate sg list";
3579                                r = -ENOMEM;
3580                                goto bad;
3581                        }
3582                        sg_init_table(sg, ic->journal_pages + 1);
3583                        for (i = 0; i < ic->journal_pages; i++) {
3584                                char *va = lowmem_page_address(ic->journal_xor[i].page);
3585                                clear_page(va);
3586                                sg_set_buf(&sg[i], va, PAGE_SIZE);
3587                        }
3588                        sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
3589
3590                        skcipher_request_set_crypt(req, sg, sg,
3591                                                   PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
3592                        init_completion(&comp.comp);
3593                        comp.in_flight = (atomic_t)ATOMIC_INIT(1);
3594                        if (do_crypt(true, req, &comp))
3595                                wait_for_completion(&comp.comp);
3596                        kvfree(sg);
3597                        r = dm_integrity_failed(ic);
3598                        if (r) {
3599                                *error = "Unable to encrypt journal";
3600                                goto bad;
3601                        }
3602                        DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data");
3603
3604                        crypto_free_skcipher(ic->journal_crypt);
3605                        ic->journal_crypt = NULL;
3606                } else {
3607                        unsigned crypt_len = roundup(ivsize, blocksize);
3608
3609                        req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3610                        if (!req) {
3611                                *error = "Could not allocate crypt request";
3612                                r = -ENOMEM;
3613                                goto bad;
3614                        }
3615
3616                        crypt_iv = kmalloc(ivsize, GFP_KERNEL);
3617                        if (!crypt_iv) {
3618                                *error = "Could not allocate iv";
3619                                r = -ENOMEM;
3620                                goto bad;
3621                        }
3622
3623                        crypt_data = kmalloc(crypt_len, GFP_KERNEL);
3624                        if (!crypt_data) {
3625                                *error = "Unable to allocate crypt data";
3626                                r = -ENOMEM;
3627                                goto bad;
3628                        }
3629
3630                        ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
3631                        if (!ic->journal_scatterlist) {
3632                                *error = "Unable to allocate sg list";
3633                                r = -ENOMEM;
3634                                goto bad;
3635                        }
3636                        ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal_io);
3637                        if (!ic->journal_io_scatterlist) {
3638                                *error = "Unable to allocate sg list";
3639                                r = -ENOMEM;
3640                                goto bad;
3641                        }
3642                        ic->sk_requests = kvmalloc_array(ic->journal_sections,
3643                                                         sizeof(struct skcipher_request *),
3644                                                         GFP_KERNEL | __GFP_ZERO);
3645                        if (!ic->sk_requests) {
3646                                *error = "Unable to allocate sk requests";
3647                                r = -ENOMEM;
3648                                goto bad;
3649                        }
3650                        for (i = 0; i < ic->journal_sections; i++) {
3651                                struct scatterlist sg;
3652                                struct skcipher_request *section_req;
3653                                __u32 section_le = cpu_to_le32(i);
3654
3655                                memset(crypt_iv, 0x00, ivsize);
3656                                memset(crypt_data, 0x00, crypt_len);
3657                                memcpy(crypt_data, &section_le, min((size_t)crypt_len, sizeof(section_le)));
3658
3659                                sg_init_one(&sg, crypt_data, crypt_len);
3660                                skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv);
3661                                init_completion(&comp.comp);
3662                                comp.in_flight = (atomic_t)ATOMIC_INIT(1);
3663                                if (do_crypt(true, req, &comp))
3664                                        wait_for_completion(&comp.comp);
3665
3666                                r = dm_integrity_failed(ic);
3667                                if (r) {
3668                                        *error = "Unable to generate iv";
3669                                        goto bad;
3670                                }
3671
3672                                section_req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3673                                if (!section_req) {
3674                                        *error = "Unable to allocate crypt request";
3675                                        r = -ENOMEM;
3676                                        goto bad;
3677                                }
3678                                section_req->iv = kmalloc_array(ivsize, 2,
3679                                                                GFP_KERNEL);
3680                                if (!section_req->iv) {
3681                                        skcipher_request_free(section_req);
3682                                        *error = "Unable to allocate iv";
3683                                        r = -ENOMEM;
3684                                        goto bad;
3685                                }
3686                                memcpy(section_req->iv + ivsize, crypt_data, ivsize);
3687                                section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT;
3688                                ic->sk_requests[i] = section_req;
3689                                DEBUG_bytes(crypt_data, ivsize, "iv(%u)", i);
3690                        }
3691                }
3692        }
3693
3694        for (i = 0; i < N_COMMIT_IDS; i++) {
3695                unsigned j;
3696retest_commit_id:
3697                for (j = 0; j < i; j++) {
3698                        if (ic->commit_ids[j] == ic->commit_ids[i]) {
3699                                ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1);
3700                                goto retest_commit_id;
3701                        }
3702                }
3703                DEBUG_print("commit id %u: %016llx\n", i, ic->commit_ids[i]);
3704        }
3705
3706        journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node);
3707        if (journal_tree_size > ULONG_MAX) {
3708                *error = "Journal doesn't fit into memory";
3709                r = -ENOMEM;
3710                goto bad;
3711        }
3712        ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL);
3713        if (!ic->journal_tree) {
3714                *error = "Could not allocate memory for journal tree";
3715                r = -ENOMEM;
3716        }
3717bad:
3718        kfree(crypt_data);
3719        kfree(crypt_iv);
3720        skcipher_request_free(req);
3721
3722        return r;
3723}
3724
3725/*
3726 * Construct a integrity mapping
3727 *
3728 * Arguments:
3729 *      device
3730 *      offset from the start of the device
3731 *      tag size
3732 *      D - direct writes, J - journal writes, B - bitmap mode, R - recovery mode
3733 *      number of optional arguments
3734 *      optional arguments:
3735 *              journal_sectors
3736 *              interleave_sectors
3737 *              buffer_sectors
3738 *              journal_watermark
3739 *              commit_time
3740 *              meta_device
3741 *              block_size
3742 *              sectors_per_bit
3743 *              bitmap_flush_interval
3744 *              internal_hash
3745 *              journal_crypt
3746 *              journal_mac
3747 *              recalculate
3748 */
3749static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
3750{
3751        struct dm_integrity_c *ic;
3752        char dummy;
3753        int r;
3754        unsigned extra_args;
3755        struct dm_arg_set as;
3756        static const struct dm_arg _args[] = {
3757                {0, 9, "Invalid number of feature args"},
3758        };
3759        unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
3760        bool should_write_sb;
3761        __u64 threshold;
3762        unsigned long long start;
3763        __s8 log2_sectors_per_bitmap_bit = -1;
3764        __s8 log2_blocks_per_bitmap_bit;
3765        __u64 bits_in_journal;
3766        __u64 n_bitmap_bits;
3767
3768#define DIRECT_ARGUMENTS        4
3769
3770        if (argc <= DIRECT_ARGUMENTS) {
3771                ti->error = "Invalid argument count";
3772                return -EINVAL;
3773        }
3774
3775        ic = kzalloc(sizeof(struct dm_integrity_c), GFP_KERNEL);
3776        if (!ic) {
3777                ti->error = "Cannot allocate integrity context";
3778                return -ENOMEM;
3779        }
3780        ti->private = ic;
3781        ti->per_io_data_size = sizeof(struct dm_integrity_io);
3782        ic->ti = ti;
3783
3784        ic->in_progress = RB_ROOT;
3785        INIT_LIST_HEAD(&ic->wait_list);
3786        init_waitqueue_head(&ic->endio_wait);
3787        bio_list_init(&ic->flush_bio_list);
3788        init_waitqueue_head(&ic->copy_to_journal_wait);
3789        init_completion(&ic->crypto_backoff);
3790        atomic64_set(&ic->number_of_mismatches, 0);
3791        ic->bitmap_flush_interval = BITMAP_FLUSH_INTERVAL;
3792
3793        r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev);
3794        if (r) {
3795                ti->error = "Device lookup failed";
3796                goto bad;
3797        }
3798
3799        if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) {
3800                ti->error = "Invalid starting offset";
3801                r = -EINVAL;
3802                goto bad;
3803        }
3804        ic->start = start;
3805
3806        if (strcmp(argv[2], "-")) {
3807                if (sscanf(argv[2], "%u%c", &ic->tag_size, &dummy) != 1 || !ic->tag_size) {
3808                        ti->error = "Invalid tag size";
3809                        r = -EINVAL;
3810                        goto bad;
3811                }
3812        }
3813
3814        if (!strcmp(argv[3], "J") || !strcmp(argv[3], "B") ||
3815            !strcmp(argv[3], "D") || !strcmp(argv[3], "R")) {
3816                ic->mode = argv[3][0];
3817        } else {
3818                ti->error = "Invalid mode (expecting J, B, D, R)";
3819                r = -EINVAL;
3820                goto bad;
3821        }
3822
3823        journal_sectors = 0;
3824        interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
3825        buffer_sectors = DEFAULT_BUFFER_SECTORS;
3826        journal_watermark = DEFAULT_JOURNAL_WATERMARK;
3827        sync_msec = DEFAULT_SYNC_MSEC;
3828        ic->sectors_per_block = 1;
3829
3830        as.argc = argc - DIRECT_ARGUMENTS;
3831        as.argv = argv + DIRECT_ARGUMENTS;
3832        r = dm_read_arg_group(_args, &as, &extra_args, &ti->error);
3833        if (r)
3834                goto bad;
3835
3836        while (extra_args--) {
3837                const char *opt_string;
3838                unsigned val;
3839                unsigned long long llval;
3840                opt_string = dm_shift_arg(&as);
3841                if (!opt_string) {
3842                        r = -EINVAL;
3843                        ti->error = "Not enough feature arguments";
3844                        goto bad;
3845                }
3846                if (sscanf(opt_string, "journal_sectors:%u%c", &val, &dummy) == 1)
3847                        journal_sectors = val ? val : 1;
3848                else if (sscanf(opt_string, "interleave_sectors:%u%c", &val, &dummy) == 1)
3849                        interleave_sectors = val;
3850                else if (sscanf(opt_string, "buffer_sectors:%u%c", &val, &dummy) == 1)
3851                        buffer_sectors = val;
3852                else if (sscanf(opt_string, "journal_watermark:%u%c", &val, &dummy) == 1 && val <= 100)
3853                        journal_watermark = val;
3854                else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
3855                        sync_msec = val;
3856                else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) {
3857                        if (ic->meta_dev) {
3858                                dm_put_device(ti, ic->meta_dev);
3859                                ic->meta_dev = NULL;
3860                        }
3861                        r = dm_get_device(ti, strchr(opt_string, ':') + 1,
3862                                          dm_table_get_mode(ti->table), &ic->meta_dev);
3863                        if (r) {
3864                                ti->error = "Device lookup failed";
3865                                goto bad;
3866                        }
3867                } else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) {
3868                        if (val < 1 << SECTOR_SHIFT ||
3869                            val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT ||
3870                            (val & (val -1))) {
3871                                r = -EINVAL;
3872                                ti->error = "Invalid block_size argument";
3873                                goto bad;
3874                        }
3875                        ic->sectors_per_block = val >> SECTOR_SHIFT;
3876                } else if (sscanf(opt_string, "sectors_per_bit:%llu%c", &llval, &dummy) == 1) {
3877                        log2_sectors_per_bitmap_bit = !llval ? 0 : __ilog2_u64(llval);
3878                } else if (sscanf(opt_string, "bitmap_flush_interval:%u%c", &val, &dummy) == 1) {
3879                        if (val >= (uint64_t)UINT_MAX * 1000 / HZ) {
3880                                r = -EINVAL;
3881                                ti->error = "Invalid bitmap_flush_interval argument";
3882                        }
3883                        ic->bitmap_flush_interval = msecs_to_jiffies(val);
3884                } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
3885                        r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
3886                                            "Invalid internal_hash argument");
3887                        if (r)
3888                                goto bad;
3889                } else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
3890                        r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
3891                                            "Invalid journal_crypt argument");
3892                        if (r)
3893                                goto bad;
3894                } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
3895                        r = get_alg_and_key(opt_string, &ic->journal_mac_alg,  &ti->error,
3896                                            "Invalid journal_mac argument");
3897                        if (r)
3898                                goto bad;
3899                } else if (!strcmp(opt_string, "recalculate")) {
3900                        ic->recalculate_flag = true;
3901                } else if (!strcmp(opt_string, "allow_discards")) {
3902                        ic->discard = true;
3903                } else if (!strcmp(opt_string, "fix_padding")) {
3904                        ic->fix_padding = true;
3905                } else {
3906                        r = -EINVAL;
3907                        ti->error = "Invalid argument";
3908                        goto bad;
3909                }
3910        }
3911
3912        ic->data_device_sectors = i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT;
3913        if (!ic->meta_dev)
3914                ic->meta_device_sectors = ic->data_device_sectors;
3915        else
3916                ic->meta_device_sectors = i_size_read(ic->meta_dev->bdev->bd_inode) >> SECTOR_SHIFT;
3917
3918        if (!journal_sectors) {
3919                journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS,
3920                                      ic->data_device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR);
3921        }
3922
3923        if (!buffer_sectors)
3924                buffer_sectors = 1;
3925        ic->log2_buffer_sectors = min((int)__fls(buffer_sectors), 31 - SECTOR_SHIFT);
3926
3927        r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error,
3928                    "Invalid internal hash", "Error setting internal hash key");
3929        if (r)
3930                goto bad;
3931
3932        r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error,
3933                    "Invalid journal mac", "Error setting journal mac key");
3934        if (r)
3935                goto bad;
3936
3937        if (!ic->tag_size) {
3938                if (!ic->internal_hash) {
3939                        ti->error = "Unknown tag size";
3940                        r = -EINVAL;
3941                        goto bad;
3942                }
3943                ic->tag_size = crypto_shash_digestsize(ic->internal_hash);
3944        }
3945        if (ic->tag_size > MAX_TAG_SIZE) {
3946                ti->error = "Too big tag size";
3947                r = -EINVAL;
3948                goto bad;
3949        }
3950        if (!(ic->tag_size & (ic->tag_size - 1)))
3951                ic->log2_tag_size = __ffs(ic->tag_size);
3952        else
3953                ic->log2_tag_size = -1;
3954
3955        if (ic->mode == 'B' && !ic->internal_hash) {
3956                r = -EINVAL;
3957                ti->error = "Bitmap mode can be only used with internal hash";
3958                goto bad;
3959        }
3960
3961        if (ic->discard && !ic->internal_hash) {
3962                r = -EINVAL;
3963                ti->error = "Discard can be only used with internal hash";
3964                goto bad;
3965        }
3966
3967        ic->autocommit_jiffies = msecs_to_jiffies(sync_msec);
3968        ic->autocommit_msec = sync_msec;
3969        timer_setup(&ic->autocommit_timer, autocommit_fn, 0);
3970
3971        ic->io = dm_io_client_create();
3972        if (IS_ERR(ic->io)) {
3973                r = PTR_ERR(ic->io);
3974                ic->io = NULL;
3975                ti->error = "Cannot allocate dm io";
3976                goto bad;
3977        }
3978
3979        r = mempool_init_slab_pool(&ic->journal_io_mempool, JOURNAL_IO_MEMPOOL, journal_io_cache);
3980        if (r) {
3981                ti->error = "Cannot allocate mempool";
3982                goto bad;
3983        }
3984
3985        ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
3986                                          WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE);
3987        if (!ic->metadata_wq) {
3988                ti->error = "Cannot allocate workqueue";
3989                r = -ENOMEM;
3990                goto bad;
3991        }
3992
3993        /*
3994         * If this workqueue were percpu, it would cause bio reordering
3995         * and reduced performance.
3996         */
3997        ic->wait_wq = alloc_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
3998        if (!ic->wait_wq) {
3999                ti->error = "Cannot allocate workqueue";
4000                r = -ENOMEM;
4001                goto bad;
4002        }
4003
4004        ic->offload_wq = alloc_workqueue("dm-integrity-offload", WQ_MEM_RECLAIM,
4005                                          METADATA_WORKQUEUE_MAX_ACTIVE);
4006        if (!ic->offload_wq) {
4007                ti->error = "Cannot allocate workqueue";
4008                r = -ENOMEM;
4009                goto bad;
4010        }
4011
4012        ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1);
4013        if (!ic->commit_wq) {
4014                ti->error = "Cannot allocate workqueue";
4015                r = -ENOMEM;
4016                goto bad;
4017        }
4018        INIT_WORK(&ic->commit_work, integrity_commit);
4019
4020        if (ic->mode == 'J' || ic->mode == 'B') {
4021                ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1);
4022                if (!ic->writer_wq) {
4023                        ti->error = "Cannot allocate workqueue";
4024                        r = -ENOMEM;
4025                        goto bad;
4026                }
4027                INIT_WORK(&ic->writer_work, integrity_writer);
4028        }
4029
4030        ic->sb = alloc_pages_exact(SB_SECTORS << SECTOR_SHIFT, GFP_KERNEL);
4031        if (!ic->sb) {
4032                r = -ENOMEM;
4033                ti->error = "Cannot allocate superblock area";
4034                goto bad;
4035        }
4036
4037        r = sync_rw_sb(ic, REQ_OP_READ, 0);
4038        if (r) {
4039                ti->error = "Error reading superblock";
4040                goto bad;
4041        }
4042        should_write_sb = false;
4043        if (memcmp(ic->sb->magic, SB_MAGIC, 8)) {
4044                if (ic->mode != 'R') {
4045                        if (memchr_inv(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT)) {
4046                                r = -EINVAL;
4047                                ti->error = "The device is not initialized";
4048                                goto bad;
4049                        }
4050                }
4051
4052                r = initialize_superblock(ic, journal_sectors, interleave_sectors);
4053                if (r) {
4054                        ti->error = "Could not initialize superblock";
4055                        goto bad;
4056                }
4057                if (ic->mode != 'R')
4058                        should_write_sb = true;
4059        }
4060
4061        if (!ic->sb->version || ic->sb->version > SB_VERSION_4) {
4062                r = -EINVAL;
4063                ti->error = "Unknown version";
4064                goto bad;
4065        }
4066        if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) {
4067                r = -EINVAL;
4068                ti->error = "Tag size doesn't match the information in superblock";
4069                goto bad;
4070        }
4071        if (ic->sb->log2_sectors_per_block != __ffs(ic->sectors_per_block)) {
4072                r = -EINVAL;
4073                ti->error = "Block size doesn't match the information in superblock";
4074                goto bad;
4075        }
4076        if (!le32_to_cpu(ic->sb->journal_sections)) {
4077                r = -EINVAL;
4078                ti->error = "Corrupted superblock, journal_sections is 0";
4079                goto bad;
4080        }
4081        /* make sure that ti->max_io_len doesn't overflow */
4082        if (!ic->meta_dev) {
4083                if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
4084                    ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
4085                        r = -EINVAL;
4086                        ti->error = "Invalid interleave_sectors in the superblock";
4087                        goto bad;
4088                }
4089        } else {
4090                if (ic->sb->log2_interleave_sectors) {
4091                        r = -EINVAL;
4092                        ti->error = "Invalid interleave_sectors in the superblock";
4093                        goto bad;
4094                }
4095        }
4096        if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string) {
4097                r = -EINVAL;
4098                ti->error = "Journal mac mismatch";
4099                goto bad;
4100        }
4101
4102        get_provided_data_sectors(ic);
4103        if (!ic->provided_data_sectors) {
4104                r = -EINVAL;
4105                ti->error = "The device is too small";
4106                goto bad;
4107        }
4108
4109try_smaller_buffer:
4110        r = calculate_device_limits(ic);
4111        if (r) {
4112                if (ic->meta_dev) {
4113                        if (ic->log2_buffer_sectors > 3) {
4114                                ic->log2_buffer_sectors--;
4115                                goto try_smaller_buffer;
4116                        }
4117                }
4118                ti->error = "The device is too small";
4119                goto bad;
4120        }
4121
4122        if (log2_sectors_per_bitmap_bit < 0)
4123                log2_sectors_per_bitmap_bit = __fls(DEFAULT_SECTORS_PER_BITMAP_BIT);
4124        if (log2_sectors_per_bitmap_bit < ic->sb->log2_sectors_per_block)
4125                log2_sectors_per_bitmap_bit = ic->sb->log2_sectors_per_block;
4126
4127        bits_in_journal = ((__u64)ic->journal_section_sectors * ic->journal_sections) << (SECTOR_SHIFT + 3);
4128        if (bits_in_journal > UINT_MAX)
4129                bits_in_journal = UINT_MAX;
4130        while (bits_in_journal < (ic->provided_data_sectors + ((sector_t)1 << log2_sectors_per_bitmap_bit) - 1) >> log2_sectors_per_bitmap_bit)
4131                log2_sectors_per_bitmap_bit++;
4132
4133        log2_blocks_per_bitmap_bit = log2_sectors_per_bitmap_bit - ic->sb->log2_sectors_per_block;
4134        ic->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
4135        if (should_write_sb) {
4136                ic->sb->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
4137        }
4138        n_bitmap_bits = ((ic->provided_data_sectors >> ic->sb->log2_sectors_per_block)
4139                                + (((sector_t)1 << log2_blocks_per_bitmap_bit) - 1)) >> log2_blocks_per_bitmap_bit;
4140        ic->n_bitmap_blocks = DIV_ROUND_UP(n_bitmap_bits, BITMAP_BLOCK_SIZE * 8);
4141
4142        if (!ic->meta_dev)
4143                ic->log2_buffer_sectors = min(ic->log2_buffer_sectors, (__u8)__ffs(ic->metadata_run));
4144
4145        if (ti->len > ic->provided_data_sectors) {
4146                r = -EINVAL;
4147                ti->error = "Not enough provided sectors for requested mapping size";
4148                goto bad;
4149        }
4150
4151
4152        threshold = (__u64)ic->journal_entries * (100 - journal_watermark);
4153        threshold += 50;
4154        do_div(threshold, 100);
4155        ic->free_sectors_threshold = threshold;
4156
4157        DEBUG_print("initialized:\n");
4158        DEBUG_print("   integrity_tag_size %u\n", le16_to_cpu(ic->sb->integrity_tag_size));
4159        DEBUG_print("   journal_entry_size %u\n", ic->journal_entry_size);
4160        DEBUG_print("   journal_entries_per_sector %u\n", ic->journal_entries_per_sector);
4161        DEBUG_print("   journal_section_entries %u\n", ic->journal_section_entries);
4162        DEBUG_print("   journal_section_sectors %u\n", ic->journal_section_sectors);
4163        DEBUG_print("   journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
4164        DEBUG_print("   journal_entries %u\n", ic->journal_entries);
4165        DEBUG_print("   log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
4166        DEBUG_print("   data_device_sectors 0x%llx\n", i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT);
4167        DEBUG_print("   initial_sectors 0x%x\n", ic->initial_sectors);
4168        DEBUG_print("   metadata_run 0x%x\n", ic->metadata_run);
4169        DEBUG_print("   log2_metadata_run %d\n", ic->log2_metadata_run);
4170        DEBUG_print("   provided_data_sectors 0x%llx (%llu)\n", ic->provided_data_sectors, ic->provided_data_sectors);
4171        DEBUG_print("   log2_buffer_sectors %u\n", ic->log2_buffer_sectors);
4172        DEBUG_print("   bits_in_journal %llu\n", bits_in_journal);
4173
4174        if (ic->recalculate_flag && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) {
4175                ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
4176                ic->sb->recalc_sector = cpu_to_le64(0);
4177        }
4178
4179        if (ic->internal_hash) {
4180                ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
4181                if (!ic->recalc_wq ) {
4182                        ti->error = "Cannot allocate workqueue";
4183                        r = -ENOMEM;
4184                        goto bad;
4185                }
4186                INIT_WORK(&ic->recalc_work, integrity_recalc);
4187                ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT);
4188                if (!ic->recalc_buffer) {
4189                        ti->error = "Cannot allocate buffer for recalculating";
4190                        r = -ENOMEM;
4191                        goto bad;
4192                }
4193                ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
4194                                                 ic->tag_size, GFP_KERNEL);
4195                if (!ic->recalc_tags) {
4196                        ti->error = "Cannot allocate tags for recalculating";
4197                        r = -ENOMEM;
4198                        goto bad;
4199                }
4200        }
4201
4202        ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
4203                        1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL);
4204        if (IS_ERR(ic->bufio)) {
4205                r = PTR_ERR(ic->bufio);
4206                ti->error = "Cannot initialize dm-bufio";
4207                ic->bufio = NULL;
4208                goto bad;
4209        }
4210        dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors);
4211
4212        if (ic->mode != 'R') {
4213                r = create_journal(ic, &ti->error);
4214                if (r)
4215                        goto bad;
4216
4217        }
4218
4219        if (ic->mode == 'B') {
4220                unsigned i;
4221                unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
4222
4223                ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
4224                if (!ic->recalc_bitmap) {
4225                        r = -ENOMEM;
4226                        goto bad;
4227                }
4228                ic->may_write_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
4229                if (!ic->may_write_bitmap) {
4230                        r = -ENOMEM;
4231                        goto bad;
4232                }
4233                ic->bbs = kvmalloc_array(ic->n_bitmap_blocks, sizeof(struct bitmap_block_status), GFP_KERNEL);
4234                if (!ic->bbs) {
4235                        r = -ENOMEM;
4236                        goto bad;
4237                }
4238                INIT_DELAYED_WORK(&ic->bitmap_flush_work, bitmap_flush_work);
4239                for (i = 0; i < ic->n_bitmap_blocks; i++) {
4240                        struct bitmap_block_status *bbs = &ic->bbs[i];
4241                        unsigned sector, pl_index, pl_offset;
4242
4243                        INIT_WORK(&bbs->work, bitmap_block_work);
4244                        bbs->ic = ic;
4245                        bbs->idx = i;
4246                        bio_list_init(&bbs->bio_queue);
4247                        spin_lock_init(&bbs->bio_queue_lock);
4248
4249                        sector = i * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT);
4250                        pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
4251                        pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
4252
4253                        bbs->bitmap = lowmem_page_address(ic->journal[pl_index].page) + pl_offset;
4254                }
4255        }
4256
4257        if (should_write_sb) {
4258                int r;
4259
4260                init_journal(ic, 0, ic->journal_sections, 0);
4261                r = dm_integrity_failed(ic);
4262                if (unlikely(r)) {
4263                        ti->error = "Error initializing journal";
4264                        goto bad;
4265                }
4266                r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
4267                if (r) {
4268                        ti->error = "Error initializing superblock";
4269                        goto bad;
4270                }
4271                ic->just_formatted = true;
4272        }
4273
4274        if (!ic->meta_dev) {
4275                r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors);
4276                if (r)
4277                        goto bad;
4278        }
4279        if (ic->mode == 'B') {
4280                unsigned max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8);
4281                if (!max_io_len)
4282                        max_io_len = 1U << 31;
4283                DEBUG_print("max_io_len: old %u, new %u\n", ti->max_io_len, max_io_len);
4284                if (!ti->max_io_len || ti->max_io_len > max_io_len) {
4285                        r = dm_set_target_max_io_len(ti, max_io_len);
4286                        if (r)
4287                                goto bad;
4288                }
4289        }
4290
4291        if (!ic->internal_hash)
4292                dm_integrity_set(ti, ic);
4293
4294        ti->num_flush_bios = 1;
4295        ti->flush_supported = true;
4296        if (ic->discard)
4297                ti->num_discard_bios = 1;
4298
4299        return 0;
4300
4301bad:
4302        dm_integrity_dtr(ti);
4303        return r;
4304}
4305
4306static void dm_integrity_dtr(struct dm_target *ti)
4307{
4308        struct dm_integrity_c *ic = ti->private;
4309
4310        BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
4311        BUG_ON(!list_empty(&ic->wait_list));
4312
4313        if (ic->metadata_wq)
4314                destroy_workqueue(ic->metadata_wq);
4315        if (ic->wait_wq)
4316                destroy_workqueue(ic->wait_wq);
4317        if (ic->offload_wq)
4318                destroy_workqueue(ic->offload_wq);
4319        if (ic->commit_wq)
4320                destroy_workqueue(ic->commit_wq);
4321        if (ic->writer_wq)
4322                destroy_workqueue(ic->writer_wq);
4323        if (ic->recalc_wq)
4324                destroy_workqueue(ic->recalc_wq);
4325        vfree(ic->recalc_buffer);
4326        kvfree(ic->recalc_tags);
4327        kvfree(ic->bbs);
4328        if (ic->bufio)
4329                dm_bufio_client_destroy(ic->bufio);
4330        mempool_exit(&ic->journal_io_mempool);
4331        if (ic->io)
4332                dm_io_client_destroy(ic->io);
4333        if (ic->dev)
4334                dm_put_device(ti, ic->dev);
4335        if (ic->meta_dev)
4336                dm_put_device(ti, ic->meta_dev);
4337        dm_integrity_free_page_list(ic->journal);
4338        dm_integrity_free_page_list(ic->journal_io);
4339        dm_integrity_free_page_list(ic->journal_xor);
4340        dm_integrity_free_page_list(ic->recalc_bitmap);
4341        dm_integrity_free_page_list(ic->may_write_bitmap);
4342        if (ic->journal_scatterlist)
4343                dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist);
4344        if (ic->journal_io_scatterlist)
4345                dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist);
4346        if (ic->sk_requests) {
4347                unsigned i;
4348
4349                for (i = 0; i < ic->journal_sections; i++) {
4350                        struct skcipher_request *req = ic->sk_requests[i];
4351                        if (req) {
4352                                kfree_sensitive(req->iv);
4353                                skcipher_request_free(req);
4354                        }
4355                }
4356                kvfree(ic->sk_requests);
4357        }
4358        kvfree(ic->journal_tree);
4359        if (ic->sb)
4360                free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT);
4361
4362        if (ic->internal_hash)
4363                crypto_free_shash(ic->internal_hash);
4364        free_alg(&ic->internal_hash_alg);
4365
4366        if (ic->journal_crypt)
4367                crypto_free_skcipher(ic->journal_crypt);
4368        free_alg(&ic->journal_crypt_alg);
4369
4370        if (ic->journal_mac)
4371                crypto_free_shash(ic->journal_mac);
4372        free_alg(&ic->journal_mac_alg);
4373
4374        kfree(ic);
4375}
4376
4377static struct target_type integrity_target = {
4378        .name                   = "integrity",
4379        .version                = {1, 6, 0},
4380        .module                 = THIS_MODULE,
4381        .features               = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
4382        .ctr                    = dm_integrity_ctr,
4383        .dtr                    = dm_integrity_dtr,
4384        .map                    = dm_integrity_map,
4385        .postsuspend            = dm_integrity_postsuspend,
4386        .resume                 = dm_integrity_resume,
4387        .status                 = dm_integrity_status,
4388        .iterate_devices        = dm_integrity_iterate_devices,
4389        .io_hints               = dm_integrity_io_hints,
4390};
4391
4392static int __init dm_integrity_init(void)
4393{
4394        int r;
4395
4396        journal_io_cache = kmem_cache_create("integrity_journal_io",
4397                                             sizeof(struct journal_io), 0, 0, NULL);
4398        if (!journal_io_cache) {
4399                DMERR("can't allocate journal io cache");
4400                return -ENOMEM;
4401        }
4402
4403        r = dm_register_target(&integrity_target);
4404
4405        if (r < 0)
4406                DMERR("register failed %d", r);
4407
4408        return r;
4409}
4410
4411static void __exit dm_integrity_exit(void)
4412{
4413        dm_unregister_target(&integrity_target);
4414        kmem_cache_destroy(journal_io_cache);
4415}
4416
4417module_init(dm_integrity_init);
4418module_exit(dm_integrity_exit);
4419
4420MODULE_AUTHOR("Milan Broz");
4421MODULE_AUTHOR("Mikulas Patocka");
4422MODULE_DESCRIPTION(DM_NAME " target for integrity tags extension");
4423MODULE_LICENSE("GPL");
4424