linux/drivers/md/dm-integrity.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2016-2017 Red Hat, Inc. All rights reserved.
   3 * Copyright (C) 2016-2017 Milan Broz
   4 * Copyright (C) 2016-2017 Mikulas Patocka
   5 *
   6 * This file is released under the GPL.
   7 */
   8
   9#include "dm-bio-record.h"
  10
  11#include <linux/compiler.h>
  12#include <linux/module.h>
  13#include <linux/device-mapper.h>
  14#include <linux/dm-io.h>
  15#include <linux/vmalloc.h>
  16#include <linux/sort.h>
  17#include <linux/rbtree.h>
  18#include <linux/delay.h>
  19#include <linux/random.h>
  20#include <linux/reboot.h>
  21#include <crypto/hash.h>
  22#include <crypto/skcipher.h>
  23#include <linux/async_tx.h>
  24#include <linux/dm-bufio.h>
  25
  26#define DM_MSG_PREFIX "integrity"
  27
  28#define DEFAULT_INTERLEAVE_SECTORS      32768
  29#define DEFAULT_JOURNAL_SIZE_FACTOR     7
  30#define DEFAULT_SECTORS_PER_BITMAP_BIT  32768
  31#define DEFAULT_BUFFER_SECTORS          128
  32#define DEFAULT_JOURNAL_WATERMARK       50
  33#define DEFAULT_SYNC_MSEC               10000
  34#define DEFAULT_MAX_JOURNAL_SECTORS     131072
  35#define MIN_LOG2_INTERLEAVE_SECTORS     3
  36#define MAX_LOG2_INTERLEAVE_SECTORS     31
  37#define METADATA_WORKQUEUE_MAX_ACTIVE   16
  38#define RECALC_SECTORS                  32768
  39#define RECALC_WRITE_SUPER              16
  40#define BITMAP_BLOCK_SIZE               4096    /* don't change it */
  41#define BITMAP_FLUSH_INTERVAL           (10 * HZ)
  42#define DISCARD_FILLER                  0xf6
  43#define SALT_SIZE                       16
  44
  45/*
  46 * Warning - DEBUG_PRINT prints security-sensitive data to the log,
  47 * so it should not be enabled in the official kernel
  48 */
  49//#define DEBUG_PRINT
  50//#define INTERNAL_VERIFY
  51
  52/*
  53 * On disk structures
  54 */
  55
  56#define SB_MAGIC                        "integrt"
  57#define SB_VERSION_1                    1
  58#define SB_VERSION_2                    2
  59#define SB_VERSION_3                    3
  60#define SB_VERSION_4                    4
  61#define SB_VERSION_5                    5
  62#define SB_SECTORS                      8
  63#define MAX_SECTORS_PER_BLOCK           8
  64
  65struct superblock {
  66        __u8 magic[8];
  67        __u8 version;
  68        __u8 log2_interleave_sectors;
  69        __le16 integrity_tag_size;
  70        __le32 journal_sections;
  71        __le64 provided_data_sectors;   /* userspace uses this value */
  72        __le32 flags;
  73        __u8 log2_sectors_per_block;
  74        __u8 log2_blocks_per_bitmap_bit;
  75        __u8 pad[2];
  76        __le64 recalc_sector;
  77        __u8 pad2[8];
  78        __u8 salt[SALT_SIZE];
  79};
  80
  81#define SB_FLAG_HAVE_JOURNAL_MAC        0x1
  82#define SB_FLAG_RECALCULATING           0x2
  83#define SB_FLAG_DIRTY_BITMAP            0x4
  84#define SB_FLAG_FIXED_PADDING           0x8
  85#define SB_FLAG_FIXED_HMAC              0x10
  86
  87#define JOURNAL_ENTRY_ROUNDUP           8
  88
  89typedef __le64 commit_id_t;
  90#define JOURNAL_MAC_PER_SECTOR          8
  91
  92struct journal_entry {
  93        union {
  94                struct {
  95                        __le32 sector_lo;
  96                        __le32 sector_hi;
  97                } s;
  98                __le64 sector;
  99        } u;
 100        commit_id_t last_bytes[];
 101        /* __u8 tag[0]; */
 102};
 103
 104#define journal_entry_tag(ic, je)               ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block])
 105
 106#if BITS_PER_LONG == 64
 107#define journal_entry_set_sector(je, x)         do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0)
 108#else
 109#define journal_entry_set_sector(je, x)         do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0)
 110#endif
 111#define journal_entry_get_sector(je)            le64_to_cpu((je)->u.sector)
 112#define journal_entry_is_unused(je)             ((je)->u.s.sector_hi == cpu_to_le32(-1))
 113#define journal_entry_set_unused(je)            do { ((je)->u.s.sector_hi = cpu_to_le32(-1)); } while (0)
 114#define journal_entry_is_inprogress(je)         ((je)->u.s.sector_hi == cpu_to_le32(-2))
 115#define journal_entry_set_inprogress(je)        do { ((je)->u.s.sector_hi = cpu_to_le32(-2)); } while (0)
 116
 117#define JOURNAL_BLOCK_SECTORS           8
 118#define JOURNAL_SECTOR_DATA             ((1 << SECTOR_SHIFT) - sizeof(commit_id_t))
 119#define JOURNAL_MAC_SIZE                (JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS)
 120
 121struct journal_sector {
 122        __u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR];
 123        __u8 mac[JOURNAL_MAC_PER_SECTOR];
 124        commit_id_t commit_id;
 125};
 126
 127#define MAX_TAG_SIZE                    (JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK]))
 128
 129#define METADATA_PADDING_SECTORS        8
 130
 131#define N_COMMIT_IDS                    4
 132
 133static unsigned char prev_commit_seq(unsigned char seq)
 134{
 135        return (seq + N_COMMIT_IDS - 1) % N_COMMIT_IDS;
 136}
 137
 138static unsigned char next_commit_seq(unsigned char seq)
 139{
 140        return (seq + 1) % N_COMMIT_IDS;
 141}
 142
 143/*
 144 * In-memory structures
 145 */
 146
 147struct journal_node {
 148        struct rb_node node;
 149        sector_t sector;
 150};
 151
 152struct alg_spec {
 153        char *alg_string;
 154        char *key_string;
 155        __u8 *key;
 156        unsigned key_size;
 157};
 158
 159struct dm_integrity_c {
 160        struct dm_dev *dev;
 161        struct dm_dev *meta_dev;
 162        unsigned tag_size;
 163        __s8 log2_tag_size;
 164        sector_t start;
 165        mempool_t journal_io_mempool;
 166        struct dm_io_client *io;
 167        struct dm_bufio_client *bufio;
 168        struct workqueue_struct *metadata_wq;
 169        struct superblock *sb;
 170        unsigned journal_pages;
 171        unsigned n_bitmap_blocks;
 172
 173        struct page_list *journal;
 174        struct page_list *journal_io;
 175        struct page_list *journal_xor;
 176        struct page_list *recalc_bitmap;
 177        struct page_list *may_write_bitmap;
 178        struct bitmap_block_status *bbs;
 179        unsigned bitmap_flush_interval;
 180        int synchronous_mode;
 181        struct bio_list synchronous_bios;
 182        struct delayed_work bitmap_flush_work;
 183
 184        struct crypto_skcipher *journal_crypt;
 185        struct scatterlist **journal_scatterlist;
 186        struct scatterlist **journal_io_scatterlist;
 187        struct skcipher_request **sk_requests;
 188
 189        struct crypto_shash *journal_mac;
 190
 191        struct journal_node *journal_tree;
 192        struct rb_root journal_tree_root;
 193
 194        sector_t provided_data_sectors;
 195
 196        unsigned short journal_entry_size;
 197        unsigned char journal_entries_per_sector;
 198        unsigned char journal_section_entries;
 199        unsigned short journal_section_sectors;
 200        unsigned journal_sections;
 201        unsigned journal_entries;
 202        sector_t data_device_sectors;
 203        sector_t meta_device_sectors;
 204        unsigned initial_sectors;
 205        unsigned metadata_run;
 206        __s8 log2_metadata_run;
 207        __u8 log2_buffer_sectors;
 208        __u8 sectors_per_block;
 209        __u8 log2_blocks_per_bitmap_bit;
 210
 211        unsigned char mode;
 212
 213        int failed;
 214
 215        struct crypto_shash *internal_hash;
 216
 217        struct dm_target *ti;
 218
 219        /* these variables are locked with endio_wait.lock */
 220        struct rb_root in_progress;
 221        struct list_head wait_list;
 222        wait_queue_head_t endio_wait;
 223        struct workqueue_struct *wait_wq;
 224        struct workqueue_struct *offload_wq;
 225
 226        unsigned char commit_seq;
 227        commit_id_t commit_ids[N_COMMIT_IDS];
 228
 229        unsigned committed_section;
 230        unsigned n_committed_sections;
 231
 232        unsigned uncommitted_section;
 233        unsigned n_uncommitted_sections;
 234
 235        unsigned free_section;
 236        unsigned char free_section_entry;
 237        unsigned free_sectors;
 238
 239        unsigned free_sectors_threshold;
 240
 241        struct workqueue_struct *commit_wq;
 242        struct work_struct commit_work;
 243
 244        struct workqueue_struct *writer_wq;
 245        struct work_struct writer_work;
 246
 247        struct workqueue_struct *recalc_wq;
 248        struct work_struct recalc_work;
 249        u8 *recalc_buffer;
 250        u8 *recalc_tags;
 251
 252        struct bio_list flush_bio_list;
 253
 254        unsigned long autocommit_jiffies;
 255        struct timer_list autocommit_timer;
 256        unsigned autocommit_msec;
 257
 258        wait_queue_head_t copy_to_journal_wait;
 259
 260        struct completion crypto_backoff;
 261
 262        bool journal_uptodate;
 263        bool just_formatted;
 264        bool recalculate_flag;
 265        bool reset_recalculate_flag;
 266        bool discard;
 267        bool fix_padding;
 268        bool fix_hmac;
 269        bool legacy_recalculate;
 270
 271        struct alg_spec internal_hash_alg;
 272        struct alg_spec journal_crypt_alg;
 273        struct alg_spec journal_mac_alg;
 274
 275        atomic64_t number_of_mismatches;
 276
 277        struct notifier_block reboot_notifier;
 278};
 279
 280struct dm_integrity_range {
 281        sector_t logical_sector;
 282        sector_t n_sectors;
 283        bool waiting;
 284        union {
 285                struct rb_node node;
 286                struct {
 287                        struct task_struct *task;
 288                        struct list_head wait_entry;
 289                };
 290        };
 291};
 292
 293struct dm_integrity_io {
 294        struct work_struct work;
 295
 296        struct dm_integrity_c *ic;
 297        enum req_opf op;
 298        bool fua;
 299
 300        struct dm_integrity_range range;
 301
 302        sector_t metadata_block;
 303        unsigned metadata_offset;
 304
 305        atomic_t in_flight;
 306        blk_status_t bi_status;
 307
 308        struct completion *completion;
 309
 310        struct dm_bio_details bio_details;
 311};
 312
 313struct journal_completion {
 314        struct dm_integrity_c *ic;
 315        atomic_t in_flight;
 316        struct completion comp;
 317};
 318
 319struct journal_io {
 320        struct dm_integrity_range range;
 321        struct journal_completion *comp;
 322};
 323
 324struct bitmap_block_status {
 325        struct work_struct work;
 326        struct dm_integrity_c *ic;
 327        unsigned idx;
 328        unsigned long *bitmap;
 329        struct bio_list bio_queue;
 330        spinlock_t bio_queue_lock;
 331
 332};
 333
 334static struct kmem_cache *journal_io_cache;
 335
 336#define JOURNAL_IO_MEMPOOL      32
 337
 338#ifdef DEBUG_PRINT
 339#define DEBUG_print(x, ...)     printk(KERN_DEBUG x, ##__VA_ARGS__)
 340static void __DEBUG_bytes(__u8 *bytes, size_t len, const char *msg, ...)
 341{
 342        va_list args;
 343        va_start(args, msg);
 344        vprintk(msg, args);
 345        va_end(args);
 346        if (len)
 347                pr_cont(":");
 348        while (len) {
 349                pr_cont(" %02x", *bytes);
 350                bytes++;
 351                len--;
 352        }
 353        pr_cont("\n");
 354}
 355#define DEBUG_bytes(bytes, len, msg, ...)       __DEBUG_bytes(bytes, len, KERN_DEBUG msg, ##__VA_ARGS__)
 356#else
 357#define DEBUG_print(x, ...)                     do { } while (0)
 358#define DEBUG_bytes(bytes, len, msg, ...)       do { } while (0)
 359#endif
 360
 361static void dm_integrity_prepare(struct request *rq)
 362{
 363}
 364
 365static void dm_integrity_complete(struct request *rq, unsigned int nr_bytes)
 366{
 367}
 368
 369/*
 370 * DM Integrity profile, protection is performed layer above (dm-crypt)
 371 */
 372static const struct blk_integrity_profile dm_integrity_profile = {
 373        .name                   = "DM-DIF-EXT-TAG",
 374        .generate_fn            = NULL,
 375        .verify_fn              = NULL,
 376        .prepare_fn             = dm_integrity_prepare,
 377        .complete_fn            = dm_integrity_complete,
 378};
 379
 380static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);
 381static void integrity_bio_wait(struct work_struct *w);
 382static void dm_integrity_dtr(struct dm_target *ti);
 383
 384static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err)
 385{
 386        if (err == -EILSEQ)
 387                atomic64_inc(&ic->number_of_mismatches);
 388        if (!cmpxchg(&ic->failed, 0, err))
 389                DMERR("Error on %s: %d", msg, err);
 390}
 391
 392static int dm_integrity_failed(struct dm_integrity_c *ic)
 393{
 394        return READ_ONCE(ic->failed);
 395}
 396
 397static bool dm_integrity_disable_recalculate(struct dm_integrity_c *ic)
 398{
 399        if (ic->legacy_recalculate)
 400                return false;
 401        if (!(ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) ?
 402            ic->internal_hash_alg.key || ic->journal_mac_alg.key :
 403            ic->internal_hash_alg.key && !ic->journal_mac_alg.key)
 404                return true;
 405        return false;
 406}
 407
 408static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
 409                                          unsigned j, unsigned char seq)
 410{
 411        /*
 412         * Xor the number with section and sector, so that if a piece of
 413         * journal is written at wrong place, it is detected.
 414         */
 415        return ic->commit_ids[seq] ^ cpu_to_le64(((__u64)i << 32) ^ j);
 416}
 417
 418static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
 419                                sector_t *area, sector_t *offset)
 420{
 421        if (!ic->meta_dev) {
 422                __u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors;
 423                *area = data_sector >> log2_interleave_sectors;
 424                *offset = (unsigned)data_sector & ((1U << log2_interleave_sectors) - 1);
 425        } else {
 426                *area = 0;
 427                *offset = data_sector;
 428        }
 429}
 430
 431#define sector_to_block(ic, n)                                          \
 432do {                                                                    \
 433        BUG_ON((n) & (unsigned)((ic)->sectors_per_block - 1));          \
 434        (n) >>= (ic)->sb->log2_sectors_per_block;                       \
 435} while (0)
 436
 437static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area,
 438                                            sector_t offset, unsigned *metadata_offset)
 439{
 440        __u64 ms;
 441        unsigned mo;
 442
 443        ms = area << ic->sb->log2_interleave_sectors;
 444        if (likely(ic->log2_metadata_run >= 0))
 445                ms += area << ic->log2_metadata_run;
 446        else
 447                ms += area * ic->metadata_run;
 448        ms >>= ic->log2_buffer_sectors;
 449
 450        sector_to_block(ic, offset);
 451
 452        if (likely(ic->log2_tag_size >= 0)) {
 453                ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size);
 454                mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
 455        } else {
 456                ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors);
 457                mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
 458        }
 459        *metadata_offset = mo;
 460        return ms;
 461}
 462
 463static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset)
 464{
 465        sector_t result;
 466
 467        if (ic->meta_dev)
 468                return offset;
 469
 470        result = area << ic->sb->log2_interleave_sectors;
 471        if (likely(ic->log2_metadata_run >= 0))
 472                result += (area + 1) << ic->log2_metadata_run;
 473        else
 474                result += (area + 1) * ic->metadata_run;
 475
 476        result += (sector_t)ic->initial_sectors + offset;
 477        result += ic->start;
 478
 479        return result;
 480}
 481
 482static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr)
 483{
 484        if (unlikely(*sec_ptr >= ic->journal_sections))
 485                *sec_ptr -= ic->journal_sections;
 486}
 487
 488static void sb_set_version(struct dm_integrity_c *ic)
 489{
 490        if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC))
 491                ic->sb->version = SB_VERSION_5;
 492        else if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING))
 493                ic->sb->version = SB_VERSION_4;
 494        else if (ic->mode == 'B' || ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP))
 495                ic->sb->version = SB_VERSION_3;
 496        else if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
 497                ic->sb->version = SB_VERSION_2;
 498        else
 499                ic->sb->version = SB_VERSION_1;
 500}
 501
 502static int sb_mac(struct dm_integrity_c *ic, bool wr)
 503{
 504        SHASH_DESC_ON_STACK(desc, ic->journal_mac);
 505        int r;
 506        unsigned size = crypto_shash_digestsize(ic->journal_mac);
 507
 508        if (sizeof(struct superblock) + size > 1 << SECTOR_SHIFT) {
 509                dm_integrity_io_error(ic, "digest is too long", -EINVAL);
 510                return -EINVAL;
 511        }
 512
 513        desc->tfm = ic->journal_mac;
 514
 515        r = crypto_shash_init(desc);
 516        if (unlikely(r < 0)) {
 517                dm_integrity_io_error(ic, "crypto_shash_init", r);
 518                return r;
 519        }
 520
 521        r = crypto_shash_update(desc, (__u8 *)ic->sb, (1 << SECTOR_SHIFT) - size);
 522        if (unlikely(r < 0)) {
 523                dm_integrity_io_error(ic, "crypto_shash_update", r);
 524                return r;
 525        }
 526
 527        if (likely(wr)) {
 528                r = crypto_shash_final(desc, (__u8 *)ic->sb + (1 << SECTOR_SHIFT) - size);
 529                if (unlikely(r < 0)) {
 530                        dm_integrity_io_error(ic, "crypto_shash_final", r);
 531                        return r;
 532                }
 533        } else {
 534                __u8 result[HASH_MAX_DIGESTSIZE];
 535                r = crypto_shash_final(desc, result);
 536                if (unlikely(r < 0)) {
 537                        dm_integrity_io_error(ic, "crypto_shash_final", r);
 538                        return r;
 539                }
 540                if (memcmp((__u8 *)ic->sb + (1 << SECTOR_SHIFT) - size, result, size)) {
 541                        dm_integrity_io_error(ic, "superblock mac", -EILSEQ);
 542                        return -EILSEQ;
 543                }
 544        }
 545
 546        return 0;
 547}
 548
 549static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags)
 550{
 551        struct dm_io_request io_req;
 552        struct dm_io_region io_loc;
 553        int r;
 554
 555        io_req.bi_op = op;
 556        io_req.bi_op_flags = op_flags;
 557        io_req.mem.type = DM_IO_KMEM;
 558        io_req.mem.ptr.addr = ic->sb;
 559        io_req.notify.fn = NULL;
 560        io_req.client = ic->io;
 561        io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
 562        io_loc.sector = ic->start;
 563        io_loc.count = SB_SECTORS;
 564
 565        if (op == REQ_OP_WRITE) {
 566                sb_set_version(ic);
 567                if (ic->journal_mac && ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
 568                        r = sb_mac(ic, true);
 569                        if (unlikely(r))
 570                                return r;
 571                }
 572        }
 573
 574        r = dm_io(&io_req, 1, &io_loc, NULL);
 575        if (unlikely(r))
 576                return r;
 577
 578        if (op == REQ_OP_READ) {
 579                if (ic->mode != 'R' && ic->journal_mac && ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
 580                        r = sb_mac(ic, false);
 581                        if (unlikely(r))
 582                                return r;
 583                }
 584        }
 585
 586        return 0;
 587}
 588
 589#define BITMAP_OP_TEST_ALL_SET          0
 590#define BITMAP_OP_TEST_ALL_CLEAR        1
 591#define BITMAP_OP_SET                   2
 592#define BITMAP_OP_CLEAR                 3
 593
 594static bool block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap,
 595                            sector_t sector, sector_t n_sectors, int mode)
 596{
 597        unsigned long bit, end_bit, this_end_bit, page, end_page;
 598        unsigned long *data;
 599
 600        if (unlikely(((sector | n_sectors) & ((1 << ic->sb->log2_sectors_per_block) - 1)) != 0)) {
 601                DMCRIT("invalid bitmap access (%llx,%llx,%d,%d,%d)",
 602                        sector,
 603                        n_sectors,
 604                        ic->sb->log2_sectors_per_block,
 605                        ic->log2_blocks_per_bitmap_bit,
 606                        mode);
 607                BUG();
 608        }
 609
 610        if (unlikely(!n_sectors))
 611                return true;
 612
 613        bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
 614        end_bit = (sector + n_sectors - 1) >>
 615                (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
 616
 617        page = bit / (PAGE_SIZE * 8);
 618        bit %= PAGE_SIZE * 8;
 619
 620        end_page = end_bit / (PAGE_SIZE * 8);
 621        end_bit %= PAGE_SIZE * 8;
 622
 623repeat:
 624        if (page < end_page) {
 625                this_end_bit = PAGE_SIZE * 8 - 1;
 626        } else {
 627                this_end_bit = end_bit;
 628        }
 629
 630        data = lowmem_page_address(bitmap[page].page);
 631
 632        if (mode == BITMAP_OP_TEST_ALL_SET) {
 633                while (bit <= this_end_bit) {
 634                        if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
 635                                do {
 636                                        if (data[bit / BITS_PER_LONG] != -1)
 637                                                return false;
 638                                        bit += BITS_PER_LONG;
 639                                } while (this_end_bit >= bit + BITS_PER_LONG - 1);
 640                                continue;
 641                        }
 642                        if (!test_bit(bit, data))
 643                                return false;
 644                        bit++;
 645                }
 646        } else if (mode == BITMAP_OP_TEST_ALL_CLEAR) {
 647                while (bit <= this_end_bit) {
 648                        if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
 649                                do {
 650                                        if (data[bit / BITS_PER_LONG] != 0)
 651                                                return false;
 652                                        bit += BITS_PER_LONG;
 653                                } while (this_end_bit >= bit + BITS_PER_LONG - 1);
 654                                continue;
 655                        }
 656                        if (test_bit(bit, data))
 657                                return false;
 658                        bit++;
 659                }
 660        } else if (mode == BITMAP_OP_SET) {
 661                while (bit <= this_end_bit) {
 662                        if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
 663                                do {
 664                                        data[bit / BITS_PER_LONG] = -1;
 665                                        bit += BITS_PER_LONG;
 666                                } while (this_end_bit >= bit + BITS_PER_LONG - 1);
 667                                continue;
 668                        }
 669                        __set_bit(bit, data);
 670                        bit++;
 671                }
 672        } else if (mode == BITMAP_OP_CLEAR) {
 673                if (!bit && this_end_bit == PAGE_SIZE * 8 - 1)
 674                        clear_page(data);
 675                else while (bit <= this_end_bit) {
 676                        if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
 677                                do {
 678                                        data[bit / BITS_PER_LONG] = 0;
 679                                        bit += BITS_PER_LONG;
 680                                } while (this_end_bit >= bit + BITS_PER_LONG - 1);
 681                                continue;
 682                        }
 683                        __clear_bit(bit, data);
 684                        bit++;
 685                }
 686        } else {
 687                BUG();
 688        }
 689
 690        if (unlikely(page < end_page)) {
 691                bit = 0;
 692                page++;
 693                goto repeat;
 694        }
 695
 696        return true;
 697}
 698
 699static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst, struct page_list *src)
 700{
 701        unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
 702        unsigned i;
 703
 704        for (i = 0; i < n_bitmap_pages; i++) {
 705                unsigned long *dst_data = lowmem_page_address(dst[i].page);
 706                unsigned long *src_data = lowmem_page_address(src[i].page);
 707                copy_page(dst_data, src_data);
 708        }
 709}
 710
 711static struct bitmap_block_status *sector_to_bitmap_block(struct dm_integrity_c *ic, sector_t sector)
 712{
 713        unsigned bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
 714        unsigned bitmap_block = bit / (BITMAP_BLOCK_SIZE * 8);
 715
 716        BUG_ON(bitmap_block >= ic->n_bitmap_blocks);
 717        return &ic->bbs[bitmap_block];
 718}
 719
 720static void access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset,
 721                                 bool e, const char *function)
 722{
 723#if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY)
 724        unsigned limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
 725
 726        if (unlikely(section >= ic->journal_sections) ||
 727            unlikely(offset >= limit)) {
 728                DMCRIT("%s: invalid access at (%u,%u), limit (%u,%u)",
 729                       function, section, offset, ic->journal_sections, limit);
 730                BUG();
 731        }
 732#endif
 733}
 734
 735static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsigned offset,
 736                               unsigned *pl_index, unsigned *pl_offset)
 737{
 738        unsigned sector;
 739
 740        access_journal_check(ic, section, offset, false, "page_list_location");
 741
 742        sector = section * ic->journal_section_sectors + offset;
 743
 744        *pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
 745        *pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
 746}
 747
 748static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl,
 749                                               unsigned section, unsigned offset, unsigned *n_sectors)
 750{
 751        unsigned pl_index, pl_offset;
 752        char *va;
 753
 754        page_list_location(ic, section, offset, &pl_index, &pl_offset);
 755
 756        if (n_sectors)
 757                *n_sectors = (PAGE_SIZE - pl_offset) >> SECTOR_SHIFT;
 758
 759        va = lowmem_page_address(pl[pl_index].page);
 760
 761        return (struct journal_sector *)(va + pl_offset);
 762}
 763
 764static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset)
 765{
 766        return access_page_list(ic, ic->journal, section, offset, NULL);
 767}
 768
 769static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned section, unsigned n)
 770{
 771        unsigned rel_sector, offset;
 772        struct journal_sector *js;
 773
 774        access_journal_check(ic, section, n, true, "access_journal_entry");
 775
 776        rel_sector = n % JOURNAL_BLOCK_SECTORS;
 777        offset = n / JOURNAL_BLOCK_SECTORS;
 778
 779        js = access_journal(ic, section, rel_sector);
 780        return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size);
 781}
 782
 783static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned section, unsigned n)
 784{
 785        n <<= ic->sb->log2_sectors_per_block;
 786
 787        n += JOURNAL_BLOCK_SECTORS;
 788
 789        access_journal_check(ic, section, n, false, "access_journal_data");
 790
 791        return access_journal(ic, section, n);
 792}
 793
 794static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result[JOURNAL_MAC_SIZE])
 795{
 796        SHASH_DESC_ON_STACK(desc, ic->journal_mac);
 797        int r;
 798        unsigned j, size;
 799
 800        desc->tfm = ic->journal_mac;
 801
 802        r = crypto_shash_init(desc);
 803        if (unlikely(r < 0)) {
 804                dm_integrity_io_error(ic, "crypto_shash_init", r);
 805                goto err;
 806        }
 807
 808        if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
 809                __le64 section_le;
 810
 811                r = crypto_shash_update(desc, (__u8 *)&ic->sb->salt, SALT_SIZE);
 812                if (unlikely(r < 0)) {
 813                        dm_integrity_io_error(ic, "crypto_shash_update", r);
 814                        goto err;
 815                }
 816
 817                section_le = cpu_to_le64(section);
 818                r = crypto_shash_update(desc, (__u8 *)&section_le, sizeof section_le);
 819                if (unlikely(r < 0)) {
 820                        dm_integrity_io_error(ic, "crypto_shash_update", r);
 821                        goto err;
 822                }
 823        }
 824
 825        for (j = 0; j < ic->journal_section_entries; j++) {
 826                struct journal_entry *je = access_journal_entry(ic, section, j);
 827                r = crypto_shash_update(desc, (__u8 *)&je->u.sector, sizeof je->u.sector);
 828                if (unlikely(r < 0)) {
 829                        dm_integrity_io_error(ic, "crypto_shash_update", r);
 830                        goto err;
 831                }
 832        }
 833
 834        size = crypto_shash_digestsize(ic->journal_mac);
 835
 836        if (likely(size <= JOURNAL_MAC_SIZE)) {
 837                r = crypto_shash_final(desc, result);
 838                if (unlikely(r < 0)) {
 839                        dm_integrity_io_error(ic, "crypto_shash_final", r);
 840                        goto err;
 841                }
 842                memset(result + size, 0, JOURNAL_MAC_SIZE - size);
 843        } else {
 844                __u8 digest[HASH_MAX_DIGESTSIZE];
 845
 846                if (WARN_ON(size > sizeof(digest))) {
 847                        dm_integrity_io_error(ic, "digest_size", -EINVAL);
 848                        goto err;
 849                }
 850                r = crypto_shash_final(desc, digest);
 851                if (unlikely(r < 0)) {
 852                        dm_integrity_io_error(ic, "crypto_shash_final", r);
 853                        goto err;
 854                }
 855                memcpy(result, digest, JOURNAL_MAC_SIZE);
 856        }
 857
 858        return;
 859err:
 860        memset(result, 0, JOURNAL_MAC_SIZE);
 861}
 862
 863static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr)
 864{
 865        __u8 result[JOURNAL_MAC_SIZE];
 866        unsigned j;
 867
 868        if (!ic->journal_mac)
 869                return;
 870
 871        section_mac(ic, section, result);
 872
 873        for (j = 0; j < JOURNAL_BLOCK_SECTORS; j++) {
 874                struct journal_sector *js = access_journal(ic, section, j);
 875
 876                if (likely(wr))
 877                        memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR);
 878                else {
 879                        if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR))
 880                                dm_integrity_io_error(ic, "journal mac", -EILSEQ);
 881                }
 882        }
 883}
 884
 885static void complete_journal_op(void *context)
 886{
 887        struct journal_completion *comp = context;
 888        BUG_ON(!atomic_read(&comp->in_flight));
 889        if (likely(atomic_dec_and_test(&comp->in_flight)))
 890                complete(&comp->comp);
 891}
 892
 893static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
 894                        unsigned n_sections, struct journal_completion *comp)
 895{
 896        struct async_submit_ctl submit;
 897        size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT;
 898        unsigned pl_index, pl_offset, section_index;
 899        struct page_list *source_pl, *target_pl;
 900
 901        if (likely(encrypt)) {
 902                source_pl = ic->journal;
 903                target_pl = ic->journal_io;
 904        } else {
 905                source_pl = ic->journal_io;
 906                target_pl = ic->journal;
 907        }
 908
 909        page_list_location(ic, section, 0, &pl_index, &pl_offset);
 910
 911        atomic_add(roundup(pl_offset + n_bytes, PAGE_SIZE) >> PAGE_SHIFT, &comp->in_flight);
 912
 913        init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, complete_journal_op, comp, NULL);
 914
 915        section_index = pl_index;
 916
 917        do {
 918                size_t this_step;
 919                struct page *src_pages[2];
 920                struct page *dst_page;
 921
 922                while (unlikely(pl_index == section_index)) {
 923                        unsigned dummy;
 924                        if (likely(encrypt))
 925                                rw_section_mac(ic, section, true);
 926                        section++;
 927                        n_sections--;
 928                        if (!n_sections)
 929                                break;
 930                        page_list_location(ic, section, 0, &section_index, &dummy);
 931                }
 932
 933                this_step = min(n_bytes, (size_t)PAGE_SIZE - pl_offset);
 934                dst_page = target_pl[pl_index].page;
 935                src_pages[0] = source_pl[pl_index].page;
 936                src_pages[1] = ic->journal_xor[pl_index].page;
 937
 938                async_xor(dst_page, src_pages, pl_offset, 2, this_step, &submit);
 939
 940                pl_index++;
 941                pl_offset = 0;
 942                n_bytes -= this_step;
 943        } while (n_bytes);
 944
 945        BUG_ON(n_sections);
 946
 947        async_tx_issue_pending_all();
 948}
 949
 950static void complete_journal_encrypt(struct crypto_async_request *req, int err)
 951{
 952        struct journal_completion *comp = req->data;
 953        if (unlikely(err)) {
 954                if (likely(err == -EINPROGRESS)) {
 955                        complete(&comp->ic->crypto_backoff);
 956                        return;
 957                }
 958                dm_integrity_io_error(comp->ic, "asynchronous encrypt", err);
 959        }
 960        complete_journal_op(comp);
 961}
 962
 963static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
 964{
 965        int r;
 966        skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
 967                                      complete_journal_encrypt, comp);
 968        if (likely(encrypt))
 969                r = crypto_skcipher_encrypt(req);
 970        else
 971                r = crypto_skcipher_decrypt(req);
 972        if (likely(!r))
 973                return false;
 974        if (likely(r == -EINPROGRESS))
 975                return true;
 976        if (likely(r == -EBUSY)) {
 977                wait_for_completion(&comp->ic->crypto_backoff);
 978                reinit_completion(&comp->ic->crypto_backoff);
 979                return true;
 980        }
 981        dm_integrity_io_error(comp->ic, "encrypt", r);
 982        return false;
 983}
 984
 985static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
 986                          unsigned n_sections, struct journal_completion *comp)
 987{
 988        struct scatterlist **source_sg;
 989        struct scatterlist **target_sg;
 990
 991        atomic_add(2, &comp->in_flight);
 992
 993        if (likely(encrypt)) {
 994                source_sg = ic->journal_scatterlist;
 995                target_sg = ic->journal_io_scatterlist;
 996        } else {
 997                source_sg = ic->journal_io_scatterlist;
 998                target_sg = ic->journal_scatterlist;
 999        }
1000
1001        do {
1002                struct skcipher_request *req;
1003                unsigned ivsize;
1004                char *iv;
1005
1006                if (likely(encrypt))
1007                        rw_section_mac(ic, section, true);
1008
1009                req = ic->sk_requests[section];
1010                ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
1011                iv = req->iv;
1012
1013                memcpy(iv, iv + ivsize, ivsize);
1014
1015                req->src = source_sg[section];
1016                req->dst = target_sg[section];
1017
1018                if (unlikely(do_crypt(encrypt, req, comp)))
1019                        atomic_inc(&comp->in_flight);
1020
1021                section++;
1022                n_sections--;
1023        } while (n_sections);
1024
1025        atomic_dec(&comp->in_flight);
1026        complete_journal_op(comp);
1027}
1028
1029static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
1030                            unsigned n_sections, struct journal_completion *comp)
1031{
1032        if (ic->journal_xor)
1033                return xor_journal(ic, encrypt, section, n_sections, comp);
1034        else
1035                return crypt_journal(ic, encrypt, section, n_sections, comp);
1036}
1037
1038static void complete_journal_io(unsigned long error, void *context)
1039{
1040        struct journal_completion *comp = context;
1041        if (unlikely(error != 0))
1042                dm_integrity_io_error(comp->ic, "writing journal", -EIO);
1043        complete_journal_op(comp);
1044}
1045
1046static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags,
1047                               unsigned sector, unsigned n_sectors, struct journal_completion *comp)
1048{
1049        struct dm_io_request io_req;
1050        struct dm_io_region io_loc;
1051        unsigned pl_index, pl_offset;
1052        int r;
1053
1054        if (unlikely(dm_integrity_failed(ic))) {
1055                if (comp)
1056                        complete_journal_io(-1UL, comp);
1057                return;
1058        }
1059
1060        pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
1061        pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
1062
1063        io_req.bi_op = op;
1064        io_req.bi_op_flags = op_flags;
1065        io_req.mem.type = DM_IO_PAGE_LIST;
1066        if (ic->journal_io)
1067                io_req.mem.ptr.pl = &ic->journal_io[pl_index];
1068        else
1069                io_req.mem.ptr.pl = &ic->journal[pl_index];
1070        io_req.mem.offset = pl_offset;
1071        if (likely(comp != NULL)) {
1072                io_req.notify.fn = complete_journal_io;
1073                io_req.notify.context = comp;
1074        } else {
1075                io_req.notify.fn = NULL;
1076        }
1077        io_req.client = ic->io;
1078        io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
1079        io_loc.sector = ic->start + SB_SECTORS + sector;
1080        io_loc.count = n_sectors;
1081
1082        r = dm_io(&io_req, 1, &io_loc, NULL);
1083        if (unlikely(r)) {
1084                dm_integrity_io_error(ic, op == REQ_OP_READ ? "reading journal" : "writing journal", r);
1085                if (comp) {
1086                        WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
1087                        complete_journal_io(-1UL, comp);
1088                }
1089        }
1090}
1091
1092static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section,
1093                       unsigned n_sections, struct journal_completion *comp)
1094{
1095        unsigned sector, n_sectors;
1096
1097        sector = section * ic->journal_section_sectors;
1098        n_sectors = n_sections * ic->journal_section_sectors;
1099
1100        rw_journal_sectors(ic, op, op_flags, sector, n_sectors, comp);
1101}
1102
1103static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections)
1104{
1105        struct journal_completion io_comp;
1106        struct journal_completion crypt_comp_1;
1107        struct journal_completion crypt_comp_2;
1108        unsigned i;
1109
1110        io_comp.ic = ic;
1111        init_completion(&io_comp.comp);
1112
1113        if (commit_start + commit_sections <= ic->journal_sections) {
1114                io_comp.in_flight = (atomic_t)ATOMIC_INIT(1);
1115                if (ic->journal_io) {
1116                        crypt_comp_1.ic = ic;
1117                        init_completion(&crypt_comp_1.comp);
1118                        crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1119                        encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1);
1120                        wait_for_completion_io(&crypt_comp_1.comp);
1121                } else {
1122                        for (i = 0; i < commit_sections; i++)
1123                                rw_section_mac(ic, commit_start + i, true);
1124                }
1125                rw_journal(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, commit_start,
1126                           commit_sections, &io_comp);
1127        } else {
1128                unsigned to_end;
1129                io_comp.in_flight = (atomic_t)ATOMIC_INIT(2);
1130                to_end = ic->journal_sections - commit_start;
1131                if (ic->journal_io) {
1132                        crypt_comp_1.ic = ic;
1133                        init_completion(&crypt_comp_1.comp);
1134                        crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1135                        encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1);
1136                        if (try_wait_for_completion(&crypt_comp_1.comp)) {
1137                                rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
1138                                reinit_completion(&crypt_comp_1.comp);
1139                                crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1140                                encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1);
1141                                wait_for_completion_io(&crypt_comp_1.comp);
1142                        } else {
1143                                crypt_comp_2.ic = ic;
1144                                init_completion(&crypt_comp_2.comp);
1145                                crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0);
1146                                encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2);
1147                                wait_for_completion_io(&crypt_comp_1.comp);
1148                                rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
1149                                wait_for_completion_io(&crypt_comp_2.comp);
1150                        }
1151                } else {
1152                        for (i = 0; i < to_end; i++)
1153                                rw_section_mac(ic, commit_start + i, true);
1154                        rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
1155                        for (i = 0; i < commit_sections - to_end; i++)
1156                                rw_section_mac(ic, i, true);
1157                }
1158                rw_journal(ic, REQ_OP_WRITE, REQ_FUA, 0, commit_sections - to_end, &io_comp);
1159        }
1160
1161        wait_for_completion_io(&io_comp.comp);
1162}
1163
1164static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset,
1165                              unsigned n_sectors, sector_t target, io_notify_fn fn, void *data)
1166{
1167        struct dm_io_request io_req;
1168        struct dm_io_region io_loc;
1169        int r;
1170        unsigned sector, pl_index, pl_offset;
1171
1172        BUG_ON((target | n_sectors | offset) & (unsigned)(ic->sectors_per_block - 1));
1173
1174        if (unlikely(dm_integrity_failed(ic))) {
1175                fn(-1UL, data);
1176                return;
1177        }
1178
1179        sector = section * ic->journal_section_sectors + JOURNAL_BLOCK_SECTORS + offset;
1180
1181        pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
1182        pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
1183
1184        io_req.bi_op = REQ_OP_WRITE;
1185        io_req.bi_op_flags = 0;
1186        io_req.mem.type = DM_IO_PAGE_LIST;
1187        io_req.mem.ptr.pl = &ic->journal[pl_index];
1188        io_req.mem.offset = pl_offset;
1189        io_req.notify.fn = fn;
1190        io_req.notify.context = data;
1191        io_req.client = ic->io;
1192        io_loc.bdev = ic->dev->bdev;
1193        io_loc.sector = target;
1194        io_loc.count = n_sectors;
1195
1196        r = dm_io(&io_req, 1, &io_loc, NULL);
1197        if (unlikely(r)) {
1198                WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
1199                fn(-1UL, data);
1200        }
1201}
1202
1203static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)
1204{
1205        return range1->logical_sector < range2->logical_sector + range2->n_sectors &&
1206               range1->logical_sector + range1->n_sectors > range2->logical_sector;
1207}
1208
1209static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
1210{
1211        struct rb_node **n = &ic->in_progress.rb_node;
1212        struct rb_node *parent;
1213
1214        BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned)(ic->sectors_per_block - 1));
1215
1216        if (likely(check_waiting)) {
1217                struct dm_integrity_range *range;
1218                list_for_each_entry(range, &ic->wait_list, wait_entry) {
1219                        if (unlikely(ranges_overlap(range, new_range)))
1220                                return false;
1221                }
1222        }
1223
1224        parent = NULL;
1225
1226        while (*n) {
1227                struct dm_integrity_range *range = container_of(*n, struct dm_integrity_range, node);
1228
1229                parent = *n;
1230                if (new_range->logical_sector + new_range->n_sectors <= range->logical_sector) {
1231                        n = &range->node.rb_left;
1232                } else if (new_range->logical_sector >= range->logical_sector + range->n_sectors) {
1233                        n = &range->node.rb_right;
1234                } else {
1235                        return false;
1236                }
1237        }
1238
1239        rb_link_node(&new_range->node, parent, n);
1240        rb_insert_color(&new_range->node, &ic->in_progress);
1241
1242        return true;
1243}
1244
1245static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range)
1246{
1247        rb_erase(&range->node, &ic->in_progress);
1248        while (unlikely(!list_empty(&ic->wait_list))) {
1249                struct dm_integrity_range *last_range =
1250                        list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
1251                struct task_struct *last_range_task;
1252                last_range_task = last_range->task;
1253                list_del(&last_range->wait_entry);
1254                if (!add_new_range(ic, last_range, false)) {
1255                        last_range->task = last_range_task;
1256                        list_add(&last_range->wait_entry, &ic->wait_list);
1257                        break;
1258                }
1259                last_range->waiting = false;
1260                wake_up_process(last_range_task);
1261        }
1262}
1263
1264static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range)
1265{
1266        unsigned long flags;
1267
1268        spin_lock_irqsave(&ic->endio_wait.lock, flags);
1269        remove_range_unlocked(ic, range);
1270        spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1271}
1272
1273static void wait_and_add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
1274{
1275        new_range->waiting = true;
1276        list_add_tail(&new_range->wait_entry, &ic->wait_list);
1277        new_range->task = current;
1278        do {
1279                __set_current_state(TASK_UNINTERRUPTIBLE);
1280                spin_unlock_irq(&ic->endio_wait.lock);
1281                io_schedule();
1282                spin_lock_irq(&ic->endio_wait.lock);
1283        } while (unlikely(new_range->waiting));
1284}
1285
1286static void add_new_range_and_wait(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
1287{
1288        if (unlikely(!add_new_range(ic, new_range, true)))
1289                wait_and_add_new_range(ic, new_range);
1290}
1291
1292static void init_journal_node(struct journal_node *node)
1293{
1294        RB_CLEAR_NODE(&node->node);
1295        node->sector = (sector_t)-1;
1296}
1297
1298static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector)
1299{
1300        struct rb_node **link;
1301        struct rb_node *parent;
1302
1303        node->sector = sector;
1304        BUG_ON(!RB_EMPTY_NODE(&node->node));
1305
1306        link = &ic->journal_tree_root.rb_node;
1307        parent = NULL;
1308
1309        while (*link) {
1310                struct journal_node *j;
1311                parent = *link;
1312                j = container_of(parent, struct journal_node, node);
1313                if (sector < j->sector)
1314                        link = &j->node.rb_left;
1315                else
1316                        link = &j->node.rb_right;
1317        }
1318
1319        rb_link_node(&node->node, parent, link);
1320        rb_insert_color(&node->node, &ic->journal_tree_root);
1321}
1322
1323static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node)
1324{
1325        BUG_ON(RB_EMPTY_NODE(&node->node));
1326        rb_erase(&node->node, &ic->journal_tree_root);
1327        init_journal_node(node);
1328}
1329
1330#define NOT_FOUND       (-1U)
1331
1332static unsigned find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)
1333{
1334        struct rb_node *n = ic->journal_tree_root.rb_node;
1335        unsigned found = NOT_FOUND;
1336        *next_sector = (sector_t)-1;
1337        while (n) {
1338                struct journal_node *j = container_of(n, struct journal_node, node);
1339                if (sector == j->sector) {
1340                        found = j - ic->journal_tree;
1341                }
1342                if (sector < j->sector) {
1343                        *next_sector = j->sector;
1344                        n = j->node.rb_left;
1345                } else {
1346                        n = j->node.rb_right;
1347                }
1348        }
1349
1350        return found;
1351}
1352
1353static bool test_journal_node(struct dm_integrity_c *ic, unsigned pos, sector_t sector)
1354{
1355        struct journal_node *node, *next_node;
1356        struct rb_node *next;
1357
1358        if (unlikely(pos >= ic->journal_entries))
1359                return false;
1360        node = &ic->journal_tree[pos];
1361        if (unlikely(RB_EMPTY_NODE(&node->node)))
1362                return false;
1363        if (unlikely(node->sector != sector))
1364                return false;
1365
1366        next = rb_next(&node->node);
1367        if (unlikely(!next))
1368                return true;
1369
1370        next_node = container_of(next, struct journal_node, node);
1371        return next_node->sector != sector;
1372}
1373
1374static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node)
1375{
1376        struct rb_node *next;
1377        struct journal_node *next_node;
1378        unsigned next_section;
1379
1380        BUG_ON(RB_EMPTY_NODE(&node->node));
1381
1382        next = rb_next(&node->node);
1383        if (unlikely(!next))
1384                return false;
1385
1386        next_node = container_of(next, struct journal_node, node);
1387
1388        if (next_node->sector != node->sector)
1389                return false;
1390
1391        next_section = (unsigned)(next_node - ic->journal_tree) / ic->journal_section_entries;
1392        if (next_section >= ic->committed_section &&
1393            next_section < ic->committed_section + ic->n_committed_sections)
1394                return true;
1395        if (next_section + ic->journal_sections < ic->committed_section + ic->n_committed_sections)
1396                return true;
1397
1398        return false;
1399}
1400
1401#define TAG_READ        0
1402#define TAG_WRITE       1
1403#define TAG_CMP         2
1404
1405static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
1406                               unsigned *metadata_offset, unsigned total_size, int op)
1407{
1408#define MAY_BE_FILLER           1
1409#define MAY_BE_HASH             2
1410        unsigned hash_offset = 0;
1411        unsigned may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
1412
1413        do {
1414                unsigned char *data, *dp;
1415                struct dm_buffer *b;
1416                unsigned to_copy;
1417                int r;
1418
1419                r = dm_integrity_failed(ic);
1420                if (unlikely(r))
1421                        return r;
1422
1423                data = dm_bufio_read(ic->bufio, *metadata_block, &b);
1424                if (IS_ERR(data))
1425                        return PTR_ERR(data);
1426
1427                to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size);
1428                dp = data + *metadata_offset;
1429                if (op == TAG_READ) {
1430                        memcpy(tag, dp, to_copy);
1431                } else if (op == TAG_WRITE) {
1432                        if (memcmp(dp, tag, to_copy)) {
1433                                memcpy(dp, tag, to_copy);
1434                                dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy);
1435                        }
1436                } else {
1437                        /* e.g.: op == TAG_CMP */
1438
1439                        if (likely(is_power_of_2(ic->tag_size))) {
1440                                if (unlikely(memcmp(dp, tag, to_copy)))
1441                                        if (unlikely(!ic->discard) ||
1442                                            unlikely(memchr_inv(dp, DISCARD_FILLER, to_copy) != NULL)) {
1443                                                goto thorough_test;
1444                                }
1445                        } else {
1446                                unsigned i, ts;
1447thorough_test:
1448                                ts = total_size;
1449
1450                                for (i = 0; i < to_copy; i++, ts--) {
1451                                        if (unlikely(dp[i] != tag[i]))
1452                                                may_be &= ~MAY_BE_HASH;
1453                                        if (likely(dp[i] != DISCARD_FILLER))
1454                                                may_be &= ~MAY_BE_FILLER;
1455                                        hash_offset++;
1456                                        if (unlikely(hash_offset == ic->tag_size)) {
1457                                                if (unlikely(!may_be)) {
1458                                                        dm_bufio_release(b);
1459                                                        return ts;
1460                                                }
1461                                                hash_offset = 0;
1462                                                may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
1463                                        }
1464                                }
1465                        }
1466                }
1467                dm_bufio_release(b);
1468
1469                tag += to_copy;
1470                *metadata_offset += to_copy;
1471                if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) {
1472                        (*metadata_block)++;
1473                        *metadata_offset = 0;
1474                }
1475
1476                if (unlikely(!is_power_of_2(ic->tag_size))) {
1477                        hash_offset = (hash_offset + to_copy) % ic->tag_size;
1478                }
1479
1480                total_size -= to_copy;
1481        } while (unlikely(total_size));
1482
1483        return 0;
1484#undef MAY_BE_FILLER
1485#undef MAY_BE_HASH
1486}
1487
1488struct flush_request {
1489        struct dm_io_request io_req;
1490        struct dm_io_region io_reg;
1491        struct dm_integrity_c *ic;
1492        struct completion comp;
1493};
1494
1495static void flush_notify(unsigned long error, void *fr_)
1496{
1497        struct flush_request *fr = fr_;
1498        if (unlikely(error != 0))
1499                dm_integrity_io_error(fr->ic, "flushing disk cache", -EIO);
1500        complete(&fr->comp);
1501}
1502
1503static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_data)
1504{
1505        int r;
1506
1507        struct flush_request fr;
1508
1509        if (!ic->meta_dev)
1510                flush_data = false;
1511        if (flush_data) {
1512                fr.io_req.bi_op = REQ_OP_WRITE,
1513                fr.io_req.bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
1514                fr.io_req.mem.type = DM_IO_KMEM,
1515                fr.io_req.mem.ptr.addr = NULL,
1516                fr.io_req.notify.fn = flush_notify,
1517                fr.io_req.notify.context = &fr;
1518                fr.io_req.client = dm_bufio_get_dm_io_client(ic->bufio),
1519                fr.io_reg.bdev = ic->dev->bdev,
1520                fr.io_reg.sector = 0,
1521                fr.io_reg.count = 0,
1522                fr.ic = ic;
1523                init_completion(&fr.comp);
1524                r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL);
1525                BUG_ON(r);
1526        }
1527
1528        r = dm_bufio_write_dirty_buffers(ic->bufio);
1529        if (unlikely(r))
1530                dm_integrity_io_error(ic, "writing tags", r);
1531
1532        if (flush_data)
1533                wait_for_completion(&fr.comp);
1534}
1535
1536static void sleep_on_endio_wait(struct dm_integrity_c *ic)
1537{
1538        DECLARE_WAITQUEUE(wait, current);
1539        __add_wait_queue(&ic->endio_wait, &wait);
1540        __set_current_state(TASK_UNINTERRUPTIBLE);
1541        spin_unlock_irq(&ic->endio_wait.lock);
1542        io_schedule();
1543        spin_lock_irq(&ic->endio_wait.lock);
1544        __remove_wait_queue(&ic->endio_wait, &wait);
1545}
1546
1547static void autocommit_fn(struct timer_list *t)
1548{
1549        struct dm_integrity_c *ic = from_timer(ic, t, autocommit_timer);
1550
1551        if (likely(!dm_integrity_failed(ic)))
1552                queue_work(ic->commit_wq, &ic->commit_work);
1553}
1554
1555static void schedule_autocommit(struct dm_integrity_c *ic)
1556{
1557        if (!timer_pending(&ic->autocommit_timer))
1558                mod_timer(&ic->autocommit_timer, jiffies + ic->autocommit_jiffies);
1559}
1560
1561static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1562{
1563        struct bio *bio;
1564        unsigned long flags;
1565
1566        spin_lock_irqsave(&ic->endio_wait.lock, flags);
1567        bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1568        bio_list_add(&ic->flush_bio_list, bio);
1569        spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1570
1571        queue_work(ic->commit_wq, &ic->commit_work);
1572}
1573
1574static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
1575{
1576        int r = dm_integrity_failed(ic);
1577        if (unlikely(r) && !bio->bi_status)
1578                bio->bi_status = errno_to_blk_status(r);
1579        if (unlikely(ic->synchronous_mode) && bio_op(bio) == REQ_OP_WRITE) {
1580                unsigned long flags;
1581                spin_lock_irqsave(&ic->endio_wait.lock, flags);
1582                bio_list_add(&ic->synchronous_bios, bio);
1583                queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
1584                spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1585                return;
1586        }
1587        bio_endio(bio);
1588}
1589
1590static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1591{
1592        struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1593
1594        if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic)))
1595                submit_flush_bio(ic, dio);
1596        else
1597                do_endio(ic, bio);
1598}
1599
1600static void dec_in_flight(struct dm_integrity_io *dio)
1601{
1602        if (atomic_dec_and_test(&dio->in_flight)) {
1603                struct dm_integrity_c *ic = dio->ic;
1604                struct bio *bio;
1605
1606                remove_range(ic, &dio->range);
1607
1608                if (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))
1609                        schedule_autocommit(ic);
1610
1611                bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1612
1613                if (unlikely(dio->bi_status) && !bio->bi_status)
1614                        bio->bi_status = dio->bi_status;
1615                if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
1616                        dio->range.logical_sector += dio->range.n_sectors;
1617                        bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
1618                        INIT_WORK(&dio->work, integrity_bio_wait);
1619                        queue_work(ic->offload_wq, &dio->work);
1620                        return;
1621                }
1622                do_endio_flush(ic, dio);
1623        }
1624}
1625
1626static void integrity_end_io(struct bio *bio)
1627{
1628        struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1629
1630        dm_bio_restore(&dio->bio_details, bio);
1631        if (bio->bi_integrity)
1632                bio->bi_opf |= REQ_INTEGRITY;
1633
1634        if (dio->completion)
1635                complete(dio->completion);
1636
1637        dec_in_flight(dio);
1638}
1639
1640static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector,
1641                                      const char *data, char *result)
1642{
1643        __le64 sector_le = cpu_to_le64(sector);
1644        SHASH_DESC_ON_STACK(req, ic->internal_hash);
1645        int r;
1646        unsigned digest_size;
1647
1648        req->tfm = ic->internal_hash;
1649
1650        r = crypto_shash_init(req);
1651        if (unlikely(r < 0)) {
1652                dm_integrity_io_error(ic, "crypto_shash_init", r);
1653                goto failed;
1654        }
1655
1656        if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
1657                r = crypto_shash_update(req, (__u8 *)&ic->sb->salt, SALT_SIZE);
1658                if (unlikely(r < 0)) {
1659                        dm_integrity_io_error(ic, "crypto_shash_update", r);
1660                        goto failed;
1661                }
1662        }
1663
1664        r = crypto_shash_update(req, (const __u8 *)&sector_le, sizeof sector_le);
1665        if (unlikely(r < 0)) {
1666                dm_integrity_io_error(ic, "crypto_shash_update", r);
1667                goto failed;
1668        }
1669
1670        r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT);
1671        if (unlikely(r < 0)) {
1672                dm_integrity_io_error(ic, "crypto_shash_update", r);
1673                goto failed;
1674        }
1675
1676        r = crypto_shash_final(req, result);
1677        if (unlikely(r < 0)) {
1678                dm_integrity_io_error(ic, "crypto_shash_final", r);
1679                goto failed;
1680        }
1681
1682        digest_size = crypto_shash_digestsize(ic->internal_hash);
1683        if (unlikely(digest_size < ic->tag_size))
1684                memset(result + digest_size, 0, ic->tag_size - digest_size);
1685
1686        return;
1687
1688failed:
1689        /* this shouldn't happen anyway, the hash functions have no reason to fail */
1690        get_random_bytes(result, ic->tag_size);
1691}
1692
1693static void integrity_metadata(struct work_struct *w)
1694{
1695        struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
1696        struct dm_integrity_c *ic = dio->ic;
1697
1698        int r;
1699
1700        if (ic->internal_hash) {
1701                struct bvec_iter iter;
1702                struct bio_vec bv;
1703                unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1704                struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1705                char *checksums;
1706                unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
1707                char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
1708                sector_t sector;
1709                unsigned sectors_to_process;
1710
1711                if (unlikely(ic->mode == 'R'))
1712                        goto skip_io;
1713
1714                if (likely(dio->op != REQ_OP_DISCARD))
1715                        checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space,
1716                                            GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
1717                else
1718                        checksums = kmalloc(PAGE_SIZE, GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
1719                if (!checksums) {
1720                        checksums = checksums_onstack;
1721                        if (WARN_ON(extra_space &&
1722                                    digest_size > sizeof(checksums_onstack))) {
1723                                r = -EINVAL;
1724                                goto error;
1725                        }
1726                }
1727
1728                if (unlikely(dio->op == REQ_OP_DISCARD)) {
1729                        sector_t bi_sector = dio->bio_details.bi_iter.bi_sector;
1730                        unsigned bi_size = dio->bio_details.bi_iter.bi_size;
1731                        unsigned max_size = likely(checksums != checksums_onstack) ? PAGE_SIZE : HASH_MAX_DIGESTSIZE;
1732                        unsigned max_blocks = max_size / ic->tag_size;
1733                        memset(checksums, DISCARD_FILLER, max_size);
1734
1735                        while (bi_size) {
1736                                unsigned this_step_blocks = bi_size >> (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
1737                                this_step_blocks = min(this_step_blocks, max_blocks);
1738                                r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1739                                                        this_step_blocks * ic->tag_size, TAG_WRITE);
1740                                if (unlikely(r)) {
1741                                        if (likely(checksums != checksums_onstack))
1742                                                kfree(checksums);
1743                                        goto error;
1744                                }
1745
1746                                /*if (bi_size < this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block)) {
1747                                        printk("BUGG: bi_sector: %llx, bi_size: %u\n", bi_sector, bi_size);
1748                                        printk("BUGG: this_step_blocks: %u\n", this_step_blocks);
1749                                        BUG();
1750                                }*/
1751                                bi_size -= this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
1752                                bi_sector += this_step_blocks << ic->sb->log2_sectors_per_block;
1753                        }
1754
1755                        if (likely(checksums != checksums_onstack))
1756                                kfree(checksums);
1757                        goto skip_io;
1758                }
1759
1760                sector = dio->range.logical_sector;
1761                sectors_to_process = dio->range.n_sectors;
1762
1763                __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
1764                        unsigned pos;
1765                        char *mem, *checksums_ptr;
1766
1767again:
1768                        mem = (char *)kmap_atomic(bv.bv_page) + bv.bv_offset;
1769                        pos = 0;
1770                        checksums_ptr = checksums;
1771                        do {
1772                                integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr);
1773                                checksums_ptr += ic->tag_size;
1774                                sectors_to_process -= ic->sectors_per_block;
1775                                pos += ic->sectors_per_block << SECTOR_SHIFT;
1776                                sector += ic->sectors_per_block;
1777                        } while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack);
1778                        kunmap_atomic(mem);
1779
1780                        r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1781                                                checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
1782                        if (unlikely(r)) {
1783                                if (r > 0) {
1784                                        char b[BDEVNAME_SIZE];
1785                                        DMERR_LIMIT("%s: Checksum failed at sector 0x%llx", bio_devname(bio, b),
1786                                                    (sector - ((r + ic->tag_size - 1) / ic->tag_size)));
1787                                        r = -EILSEQ;
1788                                        atomic64_inc(&ic->number_of_mismatches);
1789                                }
1790                                if (likely(checksums != checksums_onstack))
1791                                        kfree(checksums);
1792                                goto error;
1793                        }
1794
1795                        if (!sectors_to_process)
1796                                break;
1797
1798                        if (unlikely(pos < bv.bv_len)) {
1799                                bv.bv_offset += pos;
1800                                bv.bv_len -= pos;
1801                                goto again;
1802                        }
1803                }
1804
1805                if (likely(checksums != checksums_onstack))
1806                        kfree(checksums);
1807        } else {
1808                struct bio_integrity_payload *bip = dio->bio_details.bi_integrity;
1809
1810                if (bip) {
1811                        struct bio_vec biv;
1812                        struct bvec_iter iter;
1813                        unsigned data_to_process = dio->range.n_sectors;
1814                        sector_to_block(ic, data_to_process);
1815                        data_to_process *= ic->tag_size;
1816
1817                        bip_for_each_vec(biv, bip, iter) {
1818                                unsigned char *tag;
1819                                unsigned this_len;
1820
1821                                BUG_ON(PageHighMem(biv.bv_page));
1822                                tag = lowmem_page_address(biv.bv_page) + biv.bv_offset;
1823                                this_len = min(biv.bv_len, data_to_process);
1824                                r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset,
1825                                                        this_len, dio->op == REQ_OP_READ ? TAG_READ : TAG_WRITE);
1826                                if (unlikely(r))
1827                                        goto error;
1828                                data_to_process -= this_len;
1829                                if (!data_to_process)
1830                                        break;
1831                        }
1832                }
1833        }
1834skip_io:
1835        dec_in_flight(dio);
1836        return;
1837error:
1838        dio->bi_status = errno_to_blk_status(r);
1839        dec_in_flight(dio);
1840}
1841
1842static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
1843{
1844        struct dm_integrity_c *ic = ti->private;
1845        struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1846        struct bio_integrity_payload *bip;
1847
1848        sector_t area, offset;
1849
1850        dio->ic = ic;
1851        dio->bi_status = 0;
1852        dio->op = bio_op(bio);
1853
1854        if (unlikely(dio->op == REQ_OP_DISCARD)) {
1855                if (ti->max_io_len) {
1856                        sector_t sec = dm_target_offset(ti, bio->bi_iter.bi_sector);
1857                        unsigned log2_max_io_len = __fls(ti->max_io_len);
1858                        sector_t start_boundary = sec >> log2_max_io_len;
1859                        sector_t end_boundary = (sec + bio_sectors(bio) - 1) >> log2_max_io_len;
1860                        if (start_boundary < end_boundary) {
1861                                sector_t len = ti->max_io_len - (sec & (ti->max_io_len - 1));
1862                                dm_accept_partial_bio(bio, len);
1863                        }
1864                }
1865        }
1866
1867        if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1868                submit_flush_bio(ic, dio);
1869                return DM_MAPIO_SUBMITTED;
1870        }
1871
1872        dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1873        dio->fua = dio->op == REQ_OP_WRITE && bio->bi_opf & REQ_FUA;
1874        if (unlikely(dio->fua)) {
1875                /*
1876                 * Don't pass down the FUA flag because we have to flush
1877                 * disk cache anyway.
1878                 */
1879                bio->bi_opf &= ~REQ_FUA;
1880        }
1881        if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) {
1882                DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx",
1883                      dio->range.logical_sector, bio_sectors(bio),
1884                      ic->provided_data_sectors);
1885                return DM_MAPIO_KILL;
1886        }
1887        if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) {
1888                DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
1889                      ic->sectors_per_block,
1890                      dio->range.logical_sector, bio_sectors(bio));
1891                return DM_MAPIO_KILL;
1892        }
1893
1894        if (ic->sectors_per_block > 1 && likely(dio->op != REQ_OP_DISCARD)) {
1895                struct bvec_iter iter;
1896                struct bio_vec bv;
1897                bio_for_each_segment(bv, bio, iter) {
1898                        if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
1899                                DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
1900                                        bv.bv_offset, bv.bv_len, ic->sectors_per_block);
1901                                return DM_MAPIO_KILL;
1902                        }
1903                }
1904        }
1905
1906        bip = bio_integrity(bio);
1907        if (!ic->internal_hash) {
1908                if (bip) {
1909                        unsigned wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block;
1910                        if (ic->log2_tag_size >= 0)
1911                                wanted_tag_size <<= ic->log2_tag_size;
1912                        else
1913                                wanted_tag_size *= ic->tag_size;
1914                        if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) {
1915                                DMERR("Invalid integrity data size %u, expected %u",
1916                                      bip->bip_iter.bi_size, wanted_tag_size);
1917                                return DM_MAPIO_KILL;
1918                        }
1919                }
1920        } else {
1921                if (unlikely(bip != NULL)) {
1922                        DMERR("Unexpected integrity data when using internal hash");
1923                        return DM_MAPIO_KILL;
1924                }
1925        }
1926
1927        if (unlikely(ic->mode == 'R') && unlikely(dio->op != REQ_OP_READ))
1928                return DM_MAPIO_KILL;
1929
1930        get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1931        dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1932        bio->bi_iter.bi_sector = get_data_sector(ic, area, offset);
1933
1934        dm_integrity_map_continue(dio, true);
1935        return DM_MAPIO_SUBMITTED;
1936}
1937
1938static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
1939                                 unsigned journal_section, unsigned journal_entry)
1940{
1941        struct dm_integrity_c *ic = dio->ic;
1942        sector_t logical_sector;
1943        unsigned n_sectors;
1944
1945        logical_sector = dio->range.logical_sector;
1946        n_sectors = dio->range.n_sectors;
1947        do {
1948                struct bio_vec bv = bio_iovec(bio);
1949                char *mem;
1950
1951                if (unlikely(bv.bv_len >> SECTOR_SHIFT > n_sectors))
1952                        bv.bv_len = n_sectors << SECTOR_SHIFT;
1953                n_sectors -= bv.bv_len >> SECTOR_SHIFT;
1954                bio_advance_iter(bio, &bio->bi_iter, bv.bv_len);
1955retry_kmap:
1956                mem = kmap_atomic(bv.bv_page);
1957                if (likely(dio->op == REQ_OP_WRITE))
1958                        flush_dcache_page(bv.bv_page);
1959
1960                do {
1961                        struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry);
1962
1963                        if (unlikely(dio->op == REQ_OP_READ)) {
1964                                struct journal_sector *js;
1965                                char *mem_ptr;
1966                                unsigned s;
1967
1968                                if (unlikely(journal_entry_is_inprogress(je))) {
1969                                        flush_dcache_page(bv.bv_page);
1970                                        kunmap_atomic(mem);
1971
1972                                        __io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
1973                                        goto retry_kmap;
1974                                }
1975                                smp_rmb();
1976                                BUG_ON(journal_entry_get_sector(je) != logical_sector);
1977                                js = access_journal_data(ic, journal_section, journal_entry);
1978                                mem_ptr = mem + bv.bv_offset;
1979                                s = 0;
1980                                do {
1981                                        memcpy(mem_ptr, js, JOURNAL_SECTOR_DATA);
1982                                        *(commit_id_t *)(mem_ptr + JOURNAL_SECTOR_DATA) = je->last_bytes[s];
1983                                        js++;
1984                                        mem_ptr += 1 << SECTOR_SHIFT;
1985                                } while (++s < ic->sectors_per_block);
1986#ifdef INTERNAL_VERIFY
1987                                if (ic->internal_hash) {
1988                                        char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
1989
1990                                        integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
1991                                        if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
1992                                                DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx",
1993                                                            logical_sector);
1994                                        }
1995                                }
1996#endif
1997                        }
1998
1999                        if (!ic->internal_hash) {
2000                                struct bio_integrity_payload *bip = bio_integrity(bio);
2001                                unsigned tag_todo = ic->tag_size;
2002                                char *tag_ptr = journal_entry_tag(ic, je);
2003
2004                                if (bip) do {
2005                                        struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
2006                                        unsigned tag_now = min(biv.bv_len, tag_todo);
2007                                        char *tag_addr;
2008                                        BUG_ON(PageHighMem(biv.bv_page));
2009                                        tag_addr = lowmem_page_address(biv.bv_page) + biv.bv_offset;
2010                                        if (likely(dio->op == REQ_OP_WRITE))
2011                                                memcpy(tag_ptr, tag_addr, tag_now);
2012                                        else
2013                                                memcpy(tag_addr, tag_ptr, tag_now);
2014                                        bvec_iter_advance(bip->bip_vec, &bip->bip_iter, tag_now);
2015                                        tag_ptr += tag_now;
2016                                        tag_todo -= tag_now;
2017                                } while (unlikely(tag_todo)); else {
2018                                        if (likely(dio->op == REQ_OP_WRITE))
2019                                                memset(tag_ptr, 0, tag_todo);
2020                                }
2021                        }
2022
2023                        if (likely(dio->op == REQ_OP_WRITE)) {
2024                                struct journal_sector *js;
2025                                unsigned s;
2026
2027                                js = access_journal_data(ic, journal_section, journal_entry);
2028                                memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT);
2029
2030                                s = 0;
2031                                do {
2032                                        je->last_bytes[s] = js[s].commit_id;
2033                                } while (++s < ic->sectors_per_block);
2034
2035                                if (ic->internal_hash) {
2036                                        unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
2037                                        if (unlikely(digest_size > ic->tag_size)) {
2038                                                char checksums_onstack[HASH_MAX_DIGESTSIZE];
2039                                                integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
2040                                                memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size);
2041                                        } else
2042                                                integrity_sector_checksum(ic, logical_sector, (char *)js, journal_entry_tag(ic, je));
2043                                }
2044
2045                                journal_entry_set_sector(je, logical_sector);
2046                        }
2047                        logical_sector += ic->sectors_per_block;
2048
2049                        journal_entry++;
2050                        if (unlikely(journal_entry == ic->journal_section_entries)) {
2051                                journal_entry = 0;
2052                                journal_section++;
2053                                wraparound_section(ic, &journal_section);
2054                        }
2055
2056                        bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT;
2057                } while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT);
2058
2059                if (unlikely(dio->op == REQ_OP_READ))
2060                        flush_dcache_page(bv.bv_page);
2061                kunmap_atomic(mem);
2062        } while (n_sectors);
2063
2064        if (likely(dio->op == REQ_OP_WRITE)) {
2065                smp_mb();
2066                if (unlikely(waitqueue_active(&ic->copy_to_journal_wait)))
2067                        wake_up(&ic->copy_to_journal_wait);
2068                if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) {
2069                        queue_work(ic->commit_wq, &ic->commit_work);
2070                } else {
2071                        schedule_autocommit(ic);
2072                }
2073        } else {
2074                remove_range(ic, &dio->range);
2075        }
2076
2077        if (unlikely(bio->bi_iter.bi_size)) {
2078                sector_t area, offset;
2079
2080                dio->range.logical_sector = logical_sector;
2081                get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
2082                dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
2083                return true;
2084        }
2085
2086        return false;
2087}
2088
2089static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map)
2090{
2091        struct dm_integrity_c *ic = dio->ic;
2092        struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
2093        unsigned journal_section, journal_entry;
2094        unsigned journal_read_pos;
2095        struct completion read_comp;
2096        bool discard_retried = false;
2097        bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ;
2098        if (unlikely(dio->op == REQ_OP_DISCARD) && ic->mode != 'D')
2099                need_sync_io = true;
2100
2101        if (need_sync_io && from_map) {
2102                INIT_WORK(&dio->work, integrity_bio_wait);
2103                queue_work(ic->offload_wq, &dio->work);
2104                return;
2105        }
2106
2107lock_retry:
2108        spin_lock_irq(&ic->endio_wait.lock);
2109retry:
2110        if (unlikely(dm_integrity_failed(ic))) {
2111                spin_unlock_irq(&ic->endio_wait.lock);
2112                do_endio(ic, bio);
2113                return;
2114        }
2115        dio->range.n_sectors = bio_sectors(bio);
2116        journal_read_pos = NOT_FOUND;
2117        if (ic->mode == 'J' && likely(dio->op != REQ_OP_DISCARD)) {
2118                if (dio->op == REQ_OP_WRITE) {
2119                        unsigned next_entry, i, pos;
2120                        unsigned ws, we, range_sectors;
2121
2122                        dio->range.n_sectors = min(dio->range.n_sectors,
2123                                                   (sector_t)ic->free_sectors << ic->sb->log2_sectors_per_block);
2124                        if (unlikely(!dio->range.n_sectors)) {
2125                                if (from_map)
2126                                        goto offload_to_thread;
2127                                sleep_on_endio_wait(ic);
2128                                goto retry;
2129                        }
2130                        range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block;
2131                        ic->free_sectors -= range_sectors;
2132                        journal_section = ic->free_section;
2133                        journal_entry = ic->free_section_entry;
2134
2135                        next_entry = ic->free_section_entry + range_sectors;
2136                        ic->free_section_entry = next_entry % ic->journal_section_entries;
2137                        ic->free_section += next_entry / ic->journal_section_entries;
2138                        ic->n_uncommitted_sections += next_entry / ic->journal_section_entries;
2139                        wraparound_section(ic, &ic->free_section);
2140
2141                        pos = journal_section * ic->journal_section_entries + journal_entry;
2142                        ws = journal_section;
2143                        we = journal_entry;
2144                        i = 0;
2145                        do {
2146                                struct journal_entry *je;
2147
2148                                add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i);
2149                                pos++;
2150                                if (unlikely(pos >= ic->journal_entries))
2151                                        pos = 0;
2152
2153                                je = access_journal_entry(ic, ws, we);
2154                                BUG_ON(!journal_entry_is_unused(je));
2155                                journal_entry_set_inprogress(je);
2156                                we++;
2157                                if (unlikely(we == ic->journal_section_entries)) {
2158                                        we = 0;
2159                                        ws++;
2160                                        wraparound_section(ic, &ws);
2161                                }
2162                        } while ((i += ic->sectors_per_block) < dio->range.n_sectors);
2163
2164                        spin_unlock_irq(&ic->endio_wait.lock);
2165                        goto journal_read_write;
2166                } else {
2167                        sector_t next_sector;
2168                        journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2169                        if (likely(journal_read_pos == NOT_FOUND)) {
2170                                if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector))
2171                                        dio->range.n_sectors = next_sector - dio->range.logical_sector;
2172                        } else {
2173                                unsigned i;
2174                                unsigned jp = journal_read_pos + 1;
2175                                for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) {
2176                                        if (!test_journal_node(ic, jp, dio->range.logical_sector + i))
2177                                                break;
2178                                }
2179                                dio->range.n_sectors = i;
2180                        }
2181                }
2182        }
2183        if (unlikely(!add_new_range(ic, &dio->range, true))) {
2184                /*
2185                 * We must not sleep in the request routine because it could
2186                 * stall bios on current->bio_list.
2187                 * So, we offload the bio to a workqueue if we have to sleep.
2188                 */
2189                if (from_map) {
2190offload_to_thread:
2191                        spin_unlock_irq(&ic->endio_wait.lock);
2192                        INIT_WORK(&dio->work, integrity_bio_wait);
2193                        queue_work(ic->wait_wq, &dio->work);
2194                        return;
2195                }
2196                if (journal_read_pos != NOT_FOUND)
2197                        dio->range.n_sectors = ic->sectors_per_block;
2198                wait_and_add_new_range(ic, &dio->range);
2199                /*
2200                 * wait_and_add_new_range drops the spinlock, so the journal
2201                 * may have been changed arbitrarily. We need to recheck.
2202                 * To simplify the code, we restrict I/O size to just one block.
2203                 */
2204                if (journal_read_pos != NOT_FOUND) {
2205                        sector_t next_sector;
2206                        unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2207                        if (unlikely(new_pos != journal_read_pos)) {
2208                                remove_range_unlocked(ic, &dio->range);
2209                                goto retry;
2210                        }
2211                }
2212        }
2213        if (ic->mode == 'J' && likely(dio->op == REQ_OP_DISCARD) && !discard_retried) {
2214                sector_t next_sector;
2215                unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2216                if (unlikely(new_pos != NOT_FOUND) ||
2217                    unlikely(next_sector < dio->range.logical_sector - dio->range.n_sectors)) {
2218                        remove_range_unlocked(ic, &dio->range);
2219                        spin_unlock_irq(&ic->endio_wait.lock);
2220                        queue_work(ic->commit_wq, &ic->commit_work);
2221                        flush_workqueue(ic->commit_wq);
2222                        queue_work(ic->writer_wq, &ic->writer_work);
2223                        flush_workqueue(ic->writer_wq);
2224                        discard_retried = true;
2225                        goto lock_retry;
2226                }
2227        }
2228        spin_unlock_irq(&ic->endio_wait.lock);
2229
2230        if (unlikely(journal_read_pos != NOT_FOUND)) {
2231                journal_section = journal_read_pos / ic->journal_section_entries;
2232                journal_entry = journal_read_pos % ic->journal_section_entries;
2233                goto journal_read_write;
2234        }
2235
2236        if (ic->mode == 'B' && (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))) {
2237                if (!block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2238                                     dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
2239                        struct bitmap_block_status *bbs;
2240
2241                        bbs = sector_to_bitmap_block(ic, dio->range.logical_sector);
2242                        spin_lock(&bbs->bio_queue_lock);
2243                        bio_list_add(&bbs->bio_queue, bio);
2244                        spin_unlock(&bbs->bio_queue_lock);
2245                        queue_work(ic->writer_wq, &bbs->work);
2246                        return;
2247                }
2248        }
2249
2250        dio->in_flight = (atomic_t)ATOMIC_INIT(2);
2251
2252        if (need_sync_io) {
2253                init_completion(&read_comp);
2254                dio->completion = &read_comp;
2255        } else
2256                dio->completion = NULL;
2257
2258        dm_bio_record(&dio->bio_details, bio);
2259        bio_set_dev(bio, ic->dev->bdev);
2260        bio->bi_integrity = NULL;
2261        bio->bi_opf &= ~REQ_INTEGRITY;
2262        bio->bi_end_io = integrity_end_io;
2263        bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
2264
2265        if (unlikely(dio->op == REQ_OP_DISCARD) && likely(ic->mode != 'D')) {
2266                integrity_metadata(&dio->work);
2267                dm_integrity_flush_buffers(ic, false);
2268
2269                dio->in_flight = (atomic_t)ATOMIC_INIT(1);
2270                dio->completion = NULL;
2271
2272                submit_bio_noacct(bio);
2273
2274                return;
2275        }
2276
2277        submit_bio_noacct(bio);
2278
2279        if (need_sync_io) {
2280                wait_for_completion_io(&read_comp);
2281                if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
2282                    dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector))
2283                        goto skip_check;
2284                if (ic->mode == 'B') {
2285                        if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector,
2286                                             dio->range.n_sectors, BITMAP_OP_TEST_ALL_CLEAR))
2287                                goto skip_check;
2288                }
2289
2290                if (likely(!bio->bi_status))
2291                        integrity_metadata(&dio->work);
2292                else
2293skip_check:
2294                        dec_in_flight(dio);
2295
2296        } else {
2297                INIT_WORK(&dio->work, integrity_metadata);
2298                queue_work(ic->metadata_wq, &dio->work);
2299        }
2300
2301        return;
2302
2303journal_read_write:
2304        if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry)))
2305                goto lock_retry;
2306
2307        do_endio_flush(ic, dio);
2308}
2309
2310
2311static void integrity_bio_wait(struct work_struct *w)
2312{
2313        struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
2314
2315        dm_integrity_map_continue(dio, false);
2316}
2317
2318static void pad_uncommitted(struct dm_integrity_c *ic)
2319{
2320        if (ic->free_section_entry) {
2321                ic->free_sectors -= ic->journal_section_entries - ic->free_section_entry;
2322                ic->free_section_entry = 0;
2323                ic->free_section++;
2324                wraparound_section(ic, &ic->free_section);
2325                ic->n_uncommitted_sections++;
2326        }
2327        if (WARN_ON(ic->journal_sections * ic->journal_section_entries !=
2328                    (ic->n_uncommitted_sections + ic->n_committed_sections) *
2329                    ic->journal_section_entries + ic->free_sectors)) {
2330                DMCRIT("journal_sections %u, journal_section_entries %u, "
2331                       "n_uncommitted_sections %u, n_committed_sections %u, "
2332                       "journal_section_entries %u, free_sectors %u",
2333                       ic->journal_sections, ic->journal_section_entries,
2334                       ic->n_uncommitted_sections, ic->n_committed_sections,
2335                       ic->journal_section_entries, ic->free_sectors);
2336        }
2337}
2338
2339static void integrity_commit(struct work_struct *w)
2340{
2341        struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work);
2342        unsigned commit_start, commit_sections;
2343        unsigned i, j, n;
2344        struct bio *flushes;
2345
2346        del_timer(&ic->autocommit_timer);
2347
2348        spin_lock_irq(&ic->endio_wait.lock);
2349        flushes = bio_list_get(&ic->flush_bio_list);
2350        if (unlikely(ic->mode != 'J')) {
2351                spin_unlock_irq(&ic->endio_wait.lock);
2352                dm_integrity_flush_buffers(ic, true);
2353                goto release_flush_bios;
2354        }
2355
2356        pad_uncommitted(ic);
2357        commit_start = ic->uncommitted_section;
2358        commit_sections = ic->n_uncommitted_sections;
2359        spin_unlock_irq(&ic->endio_wait.lock);
2360
2361        if (!commit_sections)
2362                goto release_flush_bios;
2363
2364        i = commit_start;
2365        for (n = 0; n < commit_sections; n++) {
2366                for (j = 0; j < ic->journal_section_entries; j++) {
2367                        struct journal_entry *je;
2368                        je = access_journal_entry(ic, i, j);
2369                        io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
2370                }
2371                for (j = 0; j < ic->journal_section_sectors; j++) {
2372                        struct journal_sector *js;
2373                        js = access_journal(ic, i, j);
2374                        js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq);
2375                }
2376                i++;
2377                if (unlikely(i >= ic->journal_sections))
2378                        ic->commit_seq = next_commit_seq(ic->commit_seq);
2379                wraparound_section(ic, &i);
2380        }
2381        smp_rmb();
2382
2383        write_journal(ic, commit_start, commit_sections);
2384
2385        spin_lock_irq(&ic->endio_wait.lock);
2386        ic->uncommitted_section += commit_sections;
2387        wraparound_section(ic, &ic->uncommitted_section);
2388        ic->n_uncommitted_sections -= commit_sections;
2389        ic->n_committed_sections += commit_sections;
2390        spin_unlock_irq(&ic->endio_wait.lock);
2391
2392        if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
2393                queue_work(ic->writer_wq, &ic->writer_work);
2394
2395release_flush_bios:
2396        while (flushes) {
2397                struct bio *next = flushes->bi_next;
2398                flushes->bi_next = NULL;
2399                do_endio(ic, flushes);
2400                flushes = next;
2401        }
2402}
2403
2404static void complete_copy_from_journal(unsigned long error, void *context)
2405{
2406        struct journal_io *io = context;
2407        struct journal_completion *comp = io->comp;
2408        struct dm_integrity_c *ic = comp->ic;
2409        remove_range(ic, &io->range);
2410        mempool_free(io, &ic->journal_io_mempool);
2411        if (unlikely(error != 0))
2412                dm_integrity_io_error(ic, "copying from journal", -EIO);
2413        complete_journal_op(comp);
2414}
2415
2416static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js,
2417                               struct journal_entry *je)
2418{
2419        unsigned s = 0;
2420        do {
2421                js->commit_id = je->last_bytes[s];
2422                js++;
2423        } while (++s < ic->sectors_per_block);
2424}
2425
2426static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
2427                             unsigned write_sections, bool from_replay)
2428{
2429        unsigned i, j, n;
2430        struct journal_completion comp;
2431        struct blk_plug plug;
2432
2433        blk_start_plug(&plug);
2434
2435        comp.ic = ic;
2436        comp.in_flight = (atomic_t)ATOMIC_INIT(1);
2437        init_completion(&comp.comp);
2438
2439        i = write_start;
2440        for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) {
2441#ifndef INTERNAL_VERIFY
2442                if (unlikely(from_replay))
2443#endif
2444                        rw_section_mac(ic, i, false);
2445                for (j = 0; j < ic->journal_section_entries; j++) {
2446                        struct journal_entry *je = access_journal_entry(ic, i, j);
2447                        sector_t sec, area, offset;
2448                        unsigned k, l, next_loop;
2449                        sector_t metadata_block;
2450                        unsigned metadata_offset;
2451                        struct journal_io *io;
2452
2453                        if (journal_entry_is_unused(je))
2454                                continue;
2455                        BUG_ON(unlikely(journal_entry_is_inprogress(je)) && !from_replay);
2456                        sec = journal_entry_get_sector(je);
2457                        if (unlikely(from_replay)) {
2458                                if (unlikely(sec & (unsigned)(ic->sectors_per_block - 1))) {
2459                                        dm_integrity_io_error(ic, "invalid sector in journal", -EIO);
2460                                        sec &= ~(sector_t)(ic->sectors_per_block - 1);
2461                                }
2462                        }
2463                        if (unlikely(sec >= ic->provided_data_sectors))
2464                                continue;
2465                        get_area_and_offset(ic, sec, &area, &offset);
2466                        restore_last_bytes(ic, access_journal_data(ic, i, j), je);
2467                        for (k = j + 1; k < ic->journal_section_entries; k++) {
2468                                struct journal_entry *je2 = access_journal_entry(ic, i, k);
2469                                sector_t sec2, area2, offset2;
2470                                if (journal_entry_is_unused(je2))
2471                                        break;
2472                                BUG_ON(unlikely(journal_entry_is_inprogress(je2)) && !from_replay);
2473                                sec2 = journal_entry_get_sector(je2);
2474                                if (unlikely(sec2 >= ic->provided_data_sectors))
2475                                        break;
2476                                get_area_and_offset(ic, sec2, &area2, &offset2);
2477                                if (area2 != area || offset2 != offset + ((k - j) << ic->sb->log2_sectors_per_block))
2478                                        break;
2479                                restore_last_bytes(ic, access_journal_data(ic, i, k), je2);
2480                        }
2481                        next_loop = k - 1;
2482
2483                        io = mempool_alloc(&ic->journal_io_mempool, GFP_NOIO);
2484                        io->comp = &comp;
2485                        io->range.logical_sector = sec;
2486                        io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block;
2487
2488                        spin_lock_irq(&ic->endio_wait.lock);
2489                        add_new_range_and_wait(ic, &io->range);
2490
2491                        if (likely(!from_replay)) {
2492                                struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries];
2493
2494                                /* don't write if there is newer committed sector */
2495                                while (j < k && find_newer_committed_node(ic, &section_node[j])) {
2496                                        struct journal_entry *je2 = access_journal_entry(ic, i, j);
2497
2498                                        journal_entry_set_unused(je2);
2499                                        remove_journal_node(ic, &section_node[j]);
2500                                        j++;
2501                                        sec += ic->sectors_per_block;
2502                                        offset += ic->sectors_per_block;
2503                                }
2504                                while (j < k && find_newer_committed_node(ic, &section_node[k - 1])) {
2505                                        struct journal_entry *je2 = access_journal_entry(ic, i, k - 1);
2506
2507                                        journal_entry_set_unused(je2);
2508                                        remove_journal_node(ic, &section_node[k - 1]);
2509                                        k--;
2510                                }
2511                                if (j == k) {
2512                                        remove_range_unlocked(ic, &io->range);
2513                                        spin_unlock_irq(&ic->endio_wait.lock);
2514                                        mempool_free(io, &ic->journal_io_mempool);
2515                                        goto skip_io;
2516                                }
2517                                for (l = j; l < k; l++) {
2518                                        remove_journal_node(ic, &section_node[l]);
2519                                }
2520                        }
2521                        spin_unlock_irq(&ic->endio_wait.lock);
2522
2523                        metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2524                        for (l = j; l < k; l++) {
2525                                int r;
2526                                struct journal_entry *je2 = access_journal_entry(ic, i, l);
2527
2528                                if (
2529#ifndef INTERNAL_VERIFY
2530                                    unlikely(from_replay) &&
2531#endif
2532                                    ic->internal_hash) {
2533                                        char test_tag[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
2534
2535                                        integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
2536                                                                  (char *)access_journal_data(ic, i, l), test_tag);
2537                                        if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size)))
2538                                                dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ);
2539                                }
2540
2541                                journal_entry_set_unused(je2);
2542                                r = dm_integrity_rw_tag(ic, journal_entry_tag(ic, je2), &metadata_block, &metadata_offset,
2543                                                        ic->tag_size, TAG_WRITE);
2544                                if (unlikely(r)) {
2545                                        dm_integrity_io_error(ic, "reading tags", r);
2546                                }
2547                        }
2548
2549                        atomic_inc(&comp.in_flight);
2550                        copy_from_journal(ic, i, j << ic->sb->log2_sectors_per_block,
2551                                          (k - j) << ic->sb->log2_sectors_per_block,
2552                                          get_data_sector(ic, area, offset),
2553                                          complete_copy_from_journal, io);
2554skip_io:
2555                        j = next_loop;
2556                }
2557        }
2558
2559        dm_bufio_write_dirty_buffers_async(ic->bufio);
2560
2561        blk_finish_plug(&plug);
2562
2563        complete_journal_op(&comp);
2564        wait_for_completion_io(&comp.comp);
2565
2566        dm_integrity_flush_buffers(ic, true);
2567}
2568
2569static void integrity_writer(struct work_struct *w)
2570{
2571        struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work);
2572        unsigned write_start, write_sections;
2573
2574        unsigned prev_free_sectors;
2575
2576        /* the following test is not needed, but it tests the replay code */
2577        if (unlikely(dm_post_suspending(ic->ti)) && !ic->meta_dev)
2578                return;
2579
2580        spin_lock_irq(&ic->endio_wait.lock);
2581        write_start = ic->committed_section;
2582        write_sections = ic->n_committed_sections;
2583        spin_unlock_irq(&ic->endio_wait.lock);
2584
2585        if (!write_sections)
2586                return;
2587
2588        do_journal_write(ic, write_start, write_sections, false);
2589
2590        spin_lock_irq(&ic->endio_wait.lock);
2591
2592        ic->committed_section += write_sections;
2593        wraparound_section(ic, &ic->committed_section);
2594        ic->n_committed_sections -= write_sections;
2595
2596        prev_free_sectors = ic->free_sectors;
2597        ic->free_sectors += write_sections * ic->journal_section_entries;
2598        if (unlikely(!prev_free_sectors))
2599                wake_up_locked(&ic->endio_wait);
2600
2601        spin_unlock_irq(&ic->endio_wait.lock);
2602}
2603
2604static void recalc_write_super(struct dm_integrity_c *ic)
2605{
2606        int r;
2607
2608        dm_integrity_flush_buffers(ic, false);
2609        if (dm_integrity_failed(ic))
2610                return;
2611
2612        r = sync_rw_sb(ic, REQ_OP_WRITE, 0);
2613        if (unlikely(r))
2614                dm_integrity_io_error(ic, "writing superblock", r);
2615}
2616
2617static void integrity_recalc(struct work_struct *w)
2618{
2619        struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work);
2620        struct dm_integrity_range range;
2621        struct dm_io_request io_req;
2622        struct dm_io_region io_loc;
2623        sector_t area, offset;
2624        sector_t metadata_block;
2625        unsigned metadata_offset;
2626        sector_t logical_sector, n_sectors;
2627        __u8 *t;
2628        unsigned i;
2629        int r;
2630        unsigned super_counter = 0;
2631
2632        DEBUG_print("start recalculation... (position %llx)\n", le64_to_cpu(ic->sb->recalc_sector));
2633
2634        spin_lock_irq(&ic->endio_wait.lock);
2635
2636next_chunk:
2637
2638        if (unlikely(dm_post_suspending(ic->ti)))
2639                goto unlock_ret;
2640
2641        range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
2642        if (unlikely(range.logical_sector >= ic->provided_data_sectors)) {
2643                if (ic->mode == 'B') {
2644                        block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
2645                        DEBUG_print("queue_delayed_work: bitmap_flush_work\n");
2646                        queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
2647                }
2648                goto unlock_ret;
2649        }
2650
2651        get_area_and_offset(ic, range.logical_sector, &area, &offset);
2652        range.n_sectors = min((sector_t)RECALC_SECTORS, ic->provided_data_sectors - range.logical_sector);
2653        if (!ic->meta_dev)
2654                range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned)offset);
2655
2656        add_new_range_and_wait(ic, &range);
2657        spin_unlock_irq(&ic->endio_wait.lock);
2658        logical_sector = range.logical_sector;
2659        n_sectors = range.n_sectors;
2660
2661        if (ic->mode == 'B') {
2662                if (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, n_sectors, BITMAP_OP_TEST_ALL_CLEAR)) {
2663                        goto advance_and_next;
2664                }
2665                while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector,
2666                                       ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
2667                        logical_sector += ic->sectors_per_block;
2668                        n_sectors -= ic->sectors_per_block;
2669                        cond_resched();
2670                }
2671                while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector + n_sectors - ic->sectors_per_block,
2672                                       ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
2673                        n_sectors -= ic->sectors_per_block;
2674                        cond_resched();
2675                }
2676                get_area_and_offset(ic, logical_sector, &area, &offset);
2677        }
2678
2679        DEBUG_print("recalculating: %llx, %llx\n", logical_sector, n_sectors);
2680
2681        if (unlikely(++super_counter == RECALC_WRITE_SUPER)) {
2682                recalc_write_super(ic);
2683                if (ic->mode == 'B') {
2684                        queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
2685                }
2686                super_counter = 0;
2687        }
2688
2689        if (unlikely(dm_integrity_failed(ic)))
2690                goto err;
2691
2692        io_req.bi_op = REQ_OP_READ;
2693        io_req.bi_op_flags = 0;
2694        io_req.mem.type = DM_IO_VMA;
2695        io_req.mem.ptr.addr = ic->recalc_buffer;
2696        io_req.notify.fn = NULL;
2697        io_req.client = ic->io;
2698        io_loc.bdev = ic->dev->bdev;
2699        io_loc.sector = get_data_sector(ic, area, offset);
2700        io_loc.count = n_sectors;
2701
2702        r = dm_io(&io_req, 1, &io_loc, NULL);
2703        if (unlikely(r)) {
2704                dm_integrity_io_error(ic, "reading data", r);
2705                goto err;
2706        }
2707
2708        t = ic->recalc_tags;
2709        for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
2710                integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
2711                t += ic->tag_size;
2712        }
2713
2714        metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2715
2716        r = dm_integrity_rw_tag(ic, ic->recalc_tags, &metadata_block, &metadata_offset, t - ic->recalc_tags, TAG_WRITE);
2717        if (unlikely(r)) {
2718                dm_integrity_io_error(ic, "writing tags", r);
2719                goto err;
2720        }
2721
2722        if (ic->mode == 'B') {
2723                sector_t start, end;
2724                start = (range.logical_sector >>
2725                         (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
2726                        (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2727                end = ((range.logical_sector + range.n_sectors) >>
2728                       (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
2729                        (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2730                block_bitmap_op(ic, ic->recalc_bitmap, start, end - start, BITMAP_OP_CLEAR);
2731        }
2732
2733advance_and_next:
2734        cond_resched();
2735
2736        spin_lock_irq(&ic->endio_wait.lock);
2737        remove_range_unlocked(ic, &range);
2738        ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors);
2739        goto next_chunk;
2740
2741err:
2742        remove_range(ic, &range);
2743        return;
2744
2745unlock_ret:
2746        spin_unlock_irq(&ic->endio_wait.lock);
2747
2748        recalc_write_super(ic);
2749}
2750
2751static void bitmap_block_work(struct work_struct *w)
2752{
2753        struct bitmap_block_status *bbs = container_of(w, struct bitmap_block_status, work);
2754        struct dm_integrity_c *ic = bbs->ic;
2755        struct bio *bio;
2756        struct bio_list bio_queue;
2757        struct bio_list waiting;
2758
2759        bio_list_init(&waiting);
2760
2761        spin_lock(&bbs->bio_queue_lock);
2762        bio_queue = bbs->bio_queue;
2763        bio_list_init(&bbs->bio_queue);
2764        spin_unlock(&bbs->bio_queue_lock);
2765
2766        while ((bio = bio_list_pop(&bio_queue))) {
2767                struct dm_integrity_io *dio;
2768
2769                dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
2770
2771                if (block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2772                                    dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
2773                        remove_range(ic, &dio->range);
2774                        INIT_WORK(&dio->work, integrity_bio_wait);
2775                        queue_work(ic->offload_wq, &dio->work);
2776                } else {
2777                        block_bitmap_op(ic, ic->journal, dio->range.logical_sector,
2778                                        dio->range.n_sectors, BITMAP_OP_SET);
2779                        bio_list_add(&waiting, bio);
2780                }
2781        }
2782
2783        if (bio_list_empty(&waiting))
2784                return;
2785
2786        rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC,
2787                           bbs->idx * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT),
2788                           BITMAP_BLOCK_SIZE >> SECTOR_SHIFT, NULL);
2789
2790        while ((bio = bio_list_pop(&waiting))) {
2791                struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
2792
2793                block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2794                                dio->range.n_sectors, BITMAP_OP_SET);
2795
2796                remove_range(ic, &dio->range);
2797                INIT_WORK(&dio->work, integrity_bio_wait);
2798                queue_work(ic->offload_wq, &dio->work);
2799        }
2800
2801        queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
2802}
2803
2804static void bitmap_flush_work(struct work_struct *work)
2805{
2806        struct dm_integrity_c *ic = container_of(work, struct dm_integrity_c, bitmap_flush_work.work);
2807        struct dm_integrity_range range;
2808        unsigned long limit;
2809        struct bio *bio;
2810
2811        dm_integrity_flush_buffers(ic, false);
2812
2813        range.logical_sector = 0;
2814        range.n_sectors = ic->provided_data_sectors;
2815
2816        spin_lock_irq(&ic->endio_wait.lock);
2817        add_new_range_and_wait(ic, &range);
2818        spin_unlock_irq(&ic->endio_wait.lock);
2819
2820        dm_integrity_flush_buffers(ic, true);
2821
2822        limit = ic->provided_data_sectors;
2823        if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
2824                limit = le64_to_cpu(ic->sb->recalc_sector)
2825                        >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)
2826                        << (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2827        }
2828        /*DEBUG_print("zeroing journal\n");*/
2829        block_bitmap_op(ic, ic->journal, 0, limit, BITMAP_OP_CLEAR);
2830        block_bitmap_op(ic, ic->may_write_bitmap, 0, limit, BITMAP_OP_CLEAR);
2831
2832        rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
2833                           ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
2834
2835        spin_lock_irq(&ic->endio_wait.lock);
2836        remove_range_unlocked(ic, &range);
2837        while (unlikely((bio = bio_list_pop(&ic->synchronous_bios)) != NULL)) {
2838                bio_endio(bio);
2839                spin_unlock_irq(&ic->endio_wait.lock);
2840                spin_lock_irq(&ic->endio_wait.lock);
2841        }
2842        spin_unlock_irq(&ic->endio_wait.lock);
2843}
2844
2845
2846static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
2847                         unsigned n_sections, unsigned char commit_seq)
2848{
2849        unsigned i, j, n;
2850
2851        if (!n_sections)
2852                return;
2853
2854        for (n = 0; n < n_sections; n++) {
2855                i = start_section + n;
2856                wraparound_section(ic, &i);
2857                for (j = 0; j < ic->journal_section_sectors; j++) {
2858                        struct journal_sector *js = access_journal(ic, i, j);
2859                        memset(&js->entries, 0, JOURNAL_SECTOR_DATA);
2860                        js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq);
2861                }
2862                for (j = 0; j < ic->journal_section_entries; j++) {
2863                        struct journal_entry *je = access_journal_entry(ic, i, j);
2864                        journal_entry_set_unused(je);
2865                }
2866        }
2867
2868        write_journal(ic, start_section, n_sections);
2869}
2870
2871static int find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, commit_id_t id)
2872{
2873        unsigned char k;
2874        for (k = 0; k < N_COMMIT_IDS; k++) {
2875                if (dm_integrity_commit_id(ic, i, j, k) == id)
2876                        return k;
2877        }
2878        dm_integrity_io_error(ic, "journal commit id", -EIO);
2879        return -EIO;
2880}
2881
2882static void replay_journal(struct dm_integrity_c *ic)
2883{
2884        unsigned i, j;
2885        bool used_commit_ids[N_COMMIT_IDS];
2886        unsigned max_commit_id_sections[N_COMMIT_IDS];
2887        unsigned write_start, write_sections;
2888        unsigned continue_section;
2889        bool journal_empty;
2890        unsigned char unused, last_used, want_commit_seq;
2891
2892        if (ic->mode == 'R')
2893                return;
2894
2895        if (ic->journal_uptodate)
2896                return;
2897
2898        last_used = 0;
2899        write_start = 0;
2900
2901        if (!ic->just_formatted) {
2902                DEBUG_print("reading journal\n");
2903                rw_journal(ic, REQ_OP_READ, 0, 0, ic->journal_sections, NULL);
2904                if (ic->journal_io)
2905                        DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal");
2906                if (ic->journal_io) {
2907                        struct journal_completion crypt_comp;
2908                        crypt_comp.ic = ic;
2909                        init_completion(&crypt_comp.comp);
2910                        crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0);
2911                        encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp);
2912                        wait_for_completion(&crypt_comp.comp);
2913                }
2914                DEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, "decrypted journal");
2915        }
2916
2917        if (dm_integrity_failed(ic))
2918                goto clear_journal;
2919
2920        journal_empty = true;
2921        memset(used_commit_ids, 0, sizeof used_commit_ids);
2922        memset(max_commit_id_sections, 0, sizeof max_commit_id_sections);
2923        for (i = 0; i < ic->journal_sections; i++) {
2924                for (j = 0; j < ic->journal_section_sectors; j++) {
2925                        int k;
2926                        struct journal_sector *js = access_journal(ic, i, j);
2927                        k = find_commit_seq(ic, i, j, js->commit_id);
2928                        if (k < 0)
2929                                goto clear_journal;
2930                        used_commit_ids[k] = true;
2931                        max_commit_id_sections[k] = i;
2932                }
2933                if (journal_empty) {
2934                        for (j = 0; j < ic->journal_section_entries; j++) {
2935                                struct journal_entry *je = access_journal_entry(ic, i, j);
2936                                if (!journal_entry_is_unused(je)) {
2937                                        journal_empty = false;
2938                                        break;
2939                                }
2940                        }
2941                }
2942        }
2943
2944        if (!used_commit_ids[N_COMMIT_IDS - 1]) {
2945                unused = N_COMMIT_IDS - 1;
2946                while (unused && !used_commit_ids[unused - 1])
2947                        unused--;
2948        } else {
2949                for (unused = 0; unused < N_COMMIT_IDS; unused++)
2950                        if (!used_commit_ids[unused])
2951                                break;
2952                if (unused == N_COMMIT_IDS) {
2953                        dm_integrity_io_error(ic, "journal commit ids", -EIO);
2954                        goto clear_journal;
2955                }
2956        }
2957        DEBUG_print("first unused commit seq %d [%d,%d,%d,%d]\n",
2958                    unused, used_commit_ids[0], used_commit_ids[1],
2959                    used_commit_ids[2], used_commit_ids[3]);
2960
2961        last_used = prev_commit_seq(unused);
2962        want_commit_seq = prev_commit_seq(last_used);
2963
2964        if (!used_commit_ids[want_commit_seq] && used_commit_ids[prev_commit_seq(want_commit_seq)])
2965                journal_empty = true;
2966
2967        write_start = max_commit_id_sections[last_used] + 1;
2968        if (unlikely(write_start >= ic->journal_sections))
2969                want_commit_seq = next_commit_seq(want_commit_seq);
2970        wraparound_section(ic, &write_start);
2971
2972        i = write_start;
2973        for (write_sections = 0; write_sections < ic->journal_sections; write_sections++) {
2974                for (j = 0; j < ic->journal_section_sectors; j++) {
2975                        struct journal_sector *js = access_journal(ic, i, j);
2976
2977                        if (js->commit_id != dm_integrity_commit_id(ic, i, j, want_commit_seq)) {
2978                                /*
2979                                 * This could be caused by crash during writing.
2980                                 * We won't replay the inconsistent part of the
2981                                 * journal.
2982                                 */
2983                                DEBUG_print("commit id mismatch at position (%u, %u): %d != %d\n",
2984                                            i, j, find_commit_seq(ic, i, j, js->commit_id), want_commit_seq);
2985                                goto brk;
2986                        }
2987                }
2988                i++;
2989                if (unlikely(i >= ic->journal_sections))
2990                        want_commit_seq = next_commit_seq(want_commit_seq);
2991                wraparound_section(ic, &i);
2992        }
2993brk:
2994
2995        if (!journal_empty) {
2996                DEBUG_print("replaying %u sections, starting at %u, commit seq %d\n",
2997                            write_sections, write_start, want_commit_seq);
2998                do_journal_write(ic, write_start, write_sections, true);
2999        }
3000
3001        if (write_sections == ic->journal_sections && (ic->mode == 'J' || journal_empty)) {
3002                continue_section = write_start;
3003                ic->commit_seq = want_commit_seq;
3004                DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq);
3005        } else {
3006                unsigned s;
3007                unsigned char erase_seq;
3008clear_journal:
3009                DEBUG_print("clearing journal\n");
3010
3011                erase_seq = prev_commit_seq(prev_commit_seq(last_used));
3012                s = write_start;
3013                init_journal(ic, s, 1, erase_seq);
3014                s++;
3015                wraparound_section(ic, &s);
3016                if (ic->journal_sections >= 2) {
3017                        init_journal(ic, s, ic->journal_sections - 2, erase_seq);
3018                        s += ic->journal_sections - 2;
3019                        wraparound_section(ic, &s);
3020                        init_journal(ic, s, 1, erase_seq);
3021                }
3022
3023                continue_section = 0;
3024                ic->commit_seq = next_commit_seq(erase_seq);
3025        }
3026
3027        ic->committed_section = continue_section;
3028        ic->n_committed_sections = 0;
3029
3030        ic->uncommitted_section = continue_section;
3031        ic->n_uncommitted_sections = 0;
3032
3033        ic->free_section = continue_section;
3034        ic->free_section_entry = 0;
3035        ic->free_sectors = ic->journal_entries;
3036
3037        ic->journal_tree_root = RB_ROOT;
3038        for (i = 0; i < ic->journal_entries; i++)
3039                init_journal_node(&ic->journal_tree[i]);
3040}
3041
3042static void dm_integrity_enter_synchronous_mode(struct dm_integrity_c *ic)
3043{
3044        DEBUG_print("dm_integrity_enter_synchronous_mode\n");
3045
3046        if (ic->mode == 'B') {
3047                ic->bitmap_flush_interval = msecs_to_jiffies(10) + 1;
3048                ic->synchronous_mode = 1;
3049
3050                cancel_delayed_work_sync(&ic->bitmap_flush_work);
3051                queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
3052                flush_workqueue(ic->commit_wq);
3053        }
3054}
3055
3056static int dm_integrity_reboot(struct notifier_block *n, unsigned long code, void *x)
3057{
3058        struct dm_integrity_c *ic = container_of(n, struct dm_integrity_c, reboot_notifier);
3059
3060        DEBUG_print("dm_integrity_reboot\n");
3061
3062        dm_integrity_enter_synchronous_mode(ic);
3063
3064        return NOTIFY_DONE;
3065}
3066
3067static void dm_integrity_postsuspend(struct dm_target *ti)
3068{
3069        struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
3070        int r;
3071
3072        WARN_ON(unregister_reboot_notifier(&ic->reboot_notifier));
3073
3074        del_timer_sync(&ic->autocommit_timer);
3075
3076        if (ic->recalc_wq)
3077                drain_workqueue(ic->recalc_wq);
3078
3079        if (ic->mode == 'B')
3080                cancel_delayed_work_sync(&ic->bitmap_flush_work);
3081
3082        queue_work(ic->commit_wq, &ic->commit_work);
3083        drain_workqueue(ic->commit_wq);
3084
3085        if (ic->mode == 'J') {
3086                if (ic->meta_dev)
3087                        queue_work(ic->writer_wq, &ic->writer_work);
3088                drain_workqueue(ic->writer_wq);
3089                dm_integrity_flush_buffers(ic, true);
3090        }
3091
3092        if (ic->mode == 'B') {
3093                dm_integrity_flush_buffers(ic, true);
3094#if 1
3095                /* set to 0 to test bitmap replay code */
3096                init_journal(ic, 0, ic->journal_sections, 0);
3097                ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3098                r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3099                if (unlikely(r))
3100                        dm_integrity_io_error(ic, "writing superblock", r);
3101#endif
3102        }
3103
3104        BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
3105
3106        ic->journal_uptodate = true;
3107}
3108
3109static void dm_integrity_resume(struct dm_target *ti)
3110{
3111        struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
3112        __u64 old_provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors);
3113        int r;
3114
3115        DEBUG_print("resume\n");
3116
3117        if (ic->provided_data_sectors != old_provided_data_sectors) {
3118                if (ic->provided_data_sectors > old_provided_data_sectors &&
3119                    ic->mode == 'B' &&
3120                    ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) {
3121                        rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
3122                                           ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3123                        block_bitmap_op(ic, ic->journal, old_provided_data_sectors,
3124                                        ic->provided_data_sectors - old_provided_data_sectors, BITMAP_OP_SET);
3125                        rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
3126                                           ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3127                }
3128
3129                ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
3130                r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3131                if (unlikely(r))
3132                        dm_integrity_io_error(ic, "writing superblock", r);
3133        }
3134
3135        if (ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) {
3136                DEBUG_print("resume dirty_bitmap\n");
3137                rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
3138                                   ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3139                if (ic->mode == 'B') {
3140                        if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
3141                            !ic->reset_recalculate_flag) {
3142                                block_bitmap_copy(ic, ic->recalc_bitmap, ic->journal);
3143                                block_bitmap_copy(ic, ic->may_write_bitmap, ic->journal);
3144                                if (!block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors,
3145                                                     BITMAP_OP_TEST_ALL_CLEAR)) {
3146                                        ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3147                                        ic->sb->recalc_sector = cpu_to_le64(0);
3148                                }
3149                        } else {
3150                                DEBUG_print("non-matching blocks_per_bitmap_bit: %u, %u\n",
3151                                            ic->sb->log2_blocks_per_bitmap_bit, ic->log2_blocks_per_bitmap_bit);
3152                                ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
3153                                block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3154                                block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3155                                block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3156                                rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
3157                                                   ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3158                                ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3159                                ic->sb->recalc_sector = cpu_to_le64(0);
3160                        }
3161                } else {
3162                        if (!(ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
3163                              block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR)) ||
3164                            ic->reset_recalculate_flag) {
3165                                ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3166                                ic->sb->recalc_sector = cpu_to_le64(0);
3167                        }
3168                        init_journal(ic, 0, ic->journal_sections, 0);
3169                        replay_journal(ic);
3170                        ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3171                }
3172                r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3173                if (unlikely(r))
3174                        dm_integrity_io_error(ic, "writing superblock", r);
3175        } else {
3176                replay_journal(ic);
3177                if (ic->reset_recalculate_flag) {
3178                        ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3179                        ic->sb->recalc_sector = cpu_to_le64(0);
3180                }
3181                if (ic->mode == 'B') {
3182                        ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3183                        ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
3184                        r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3185                        if (unlikely(r))
3186                                dm_integrity_io_error(ic, "writing superblock", r);
3187
3188                        block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3189                        block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3190                        block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3191                        if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
3192                            le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors) {
3193                                block_bitmap_op(ic, ic->journal, le64_to_cpu(ic->sb->recalc_sector),
3194                                                ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3195                                block_bitmap_op(ic, ic->recalc_bitmap, le64_to_cpu(ic->sb->recalc_sector),
3196                                                ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3197                                block_bitmap_op(ic, ic->may_write_bitmap, le64_to_cpu(ic->sb->recalc_sector),
3198                                                ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3199                        }
3200                        rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
3201                                           ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3202                }
3203        }
3204
3205        DEBUG_print("testing recalc: %x\n", ic->sb->flags);
3206        if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
3207                __u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector);
3208                DEBUG_print("recalc pos: %llx / %llx\n", recalc_pos, ic->provided_data_sectors);
3209                if (recalc_pos < ic->provided_data_sectors) {
3210                        queue_work(ic->recalc_wq, &ic->recalc_work);
3211                } else if (recalc_pos > ic->provided_data_sectors) {
3212                        ic->sb->recalc_sector = cpu_to_le64(ic->provided_data_sectors);
3213                        recalc_write_super(ic);
3214                }
3215        }
3216
3217        ic->reboot_notifier.notifier_call = dm_integrity_reboot;
3218        ic->reboot_notifier.next = NULL;
3219        ic->reboot_notifier.priority = INT_MAX - 1;     /* be notified after md and before hardware drivers */
3220        WARN_ON(register_reboot_notifier(&ic->reboot_notifier));
3221
3222#if 0
3223        /* set to 1 to stress test synchronous mode */
3224        dm_integrity_enter_synchronous_mode(ic);
3225#endif
3226}
3227
3228static void dm_integrity_status(struct dm_target *ti, status_type_t type,
3229                                unsigned status_flags, char *result, unsigned maxlen)
3230{
3231        struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
3232        unsigned arg_count;
3233        size_t sz = 0;
3234
3235        switch (type) {
3236        case STATUSTYPE_INFO:
3237                DMEMIT("%llu %llu",
3238                        (unsigned long long)atomic64_read(&ic->number_of_mismatches),
3239                        ic->provided_data_sectors);
3240                if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
3241                        DMEMIT(" %llu", le64_to_cpu(ic->sb->recalc_sector));
3242                else
3243                        DMEMIT(" -");
3244                break;
3245
3246        case STATUSTYPE_TABLE: {
3247                __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
3248                watermark_percentage += ic->journal_entries / 2;
3249                do_div(watermark_percentage, ic->journal_entries);
3250                arg_count = 3;
3251                arg_count += !!ic->meta_dev;
3252                arg_count += ic->sectors_per_block != 1;
3253                arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING));
3254                arg_count += ic->reset_recalculate_flag;
3255                arg_count += ic->discard;
3256                arg_count += ic->mode == 'J';
3257                arg_count += ic->mode == 'J';
3258                arg_count += ic->mode == 'B';
3259                arg_count += ic->mode == 'B';
3260                arg_count += !!ic->internal_hash_alg.alg_string;
3261                arg_count += !!ic->journal_crypt_alg.alg_string;
3262                arg_count += !!ic->journal_mac_alg.alg_string;
3263                arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0;
3264                arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0;
3265                arg_count += ic->legacy_recalculate;
3266                DMEMIT("%s %llu %u %c %u", ic->dev->name, ic->start,
3267                       ic->tag_size, ic->mode, arg_count);
3268                if (ic->meta_dev)
3269                        DMEMIT(" meta_device:%s", ic->meta_dev->name);
3270                if (ic->sectors_per_block != 1)
3271                        DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
3272                if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
3273                        DMEMIT(" recalculate");
3274                if (ic->reset_recalculate_flag)
3275                        DMEMIT(" reset_recalculate");
3276                if (ic->discard)
3277                        DMEMIT(" allow_discards");
3278                DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
3279                DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
3280                DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
3281                if (ic->mode == 'J') {
3282                        DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage);
3283                        DMEMIT(" commit_time:%u", ic->autocommit_msec);
3284                }
3285                if (ic->mode == 'B') {
3286                        DMEMIT(" sectors_per_bit:%llu", (sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit);
3287                        DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic->bitmap_flush_interval));
3288                }
3289                if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0)
3290                        DMEMIT(" fix_padding");
3291                if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0)
3292                        DMEMIT(" fix_hmac");
3293                if (ic->legacy_recalculate)
3294                        DMEMIT(" legacy_recalculate");
3295
3296#define EMIT_ALG(a, n)                                                  \
3297                do {                                                    \
3298                        if (ic->a.alg_string) {                         \
3299                                DMEMIT(" %s:%s", n, ic->a.alg_string);  \
3300                                if (ic->a.key_string)                   \
3301                                        DMEMIT(":%s", ic->a.key_string);\
3302                        }                                               \
3303                } while (0)
3304                EMIT_ALG(internal_hash_alg, "internal_hash");
3305                EMIT_ALG(journal_crypt_alg, "journal_crypt");
3306                EMIT_ALG(journal_mac_alg, "journal_mac");
3307                break;
3308        }
3309        }
3310}
3311
3312static int dm_integrity_iterate_devices(struct dm_target *ti,
3313                                        iterate_devices_callout_fn fn, void *data)
3314{
3315        struct dm_integrity_c *ic = ti->private;
3316
3317        if (!ic->meta_dev)
3318                return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data);
3319        else
3320                return fn(ti, ic->dev, 0, ti->len, data);
3321}
3322
3323static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *limits)
3324{
3325        struct dm_integrity_c *ic = ti->private;
3326
3327        if (ic->sectors_per_block > 1) {
3328                limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
3329                limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
3330                blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
3331        }
3332}
3333
3334static void calculate_journal_section_size(struct dm_integrity_c *ic)
3335{
3336        unsigned sector_space = JOURNAL_SECTOR_DATA;
3337
3338        ic->journal_sections = le32_to_cpu(ic->sb->journal_sections);
3339        ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block]) + ic->tag_size,
3340                                         JOURNAL_ENTRY_ROUNDUP);
3341
3342        if (ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC))
3343                sector_space -= JOURNAL_MAC_PER_SECTOR;
3344        ic->journal_entries_per_sector = sector_space / ic->journal_entry_size;
3345        ic->journal_section_entries = ic->journal_entries_per_sector * JOURNAL_BLOCK_SECTORS;
3346        ic->journal_section_sectors = (ic->journal_section_entries << ic->sb->log2_sectors_per_block) + JOURNAL_BLOCK_SECTORS;
3347        ic->journal_entries = ic->journal_section_entries * ic->journal_sections;
3348}
3349
3350static int calculate_device_limits(struct dm_integrity_c *ic)
3351{
3352        __u64 initial_sectors;
3353
3354        calculate_journal_section_size(ic);
3355        initial_sectors = SB_SECTORS + (__u64)ic->journal_section_sectors * ic->journal_sections;
3356        if (initial_sectors + METADATA_PADDING_SECTORS >= ic->meta_device_sectors || initial_sectors > UINT_MAX)
3357                return -EINVAL;
3358        ic->initial_sectors = initial_sectors;
3359
3360        if (!ic->meta_dev) {
3361                sector_t last_sector, last_area, last_offset;
3362
3363                /* we have to maintain excessive padding for compatibility with existing volumes */
3364                __u64 metadata_run_padding =
3365                        ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING) ?
3366                        (__u64)(METADATA_PADDING_SECTORS << SECTOR_SHIFT) :
3367                        (__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS);
3368
3369                ic->metadata_run = round_up((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block),
3370                                            metadata_run_padding) >> SECTOR_SHIFT;
3371                if (!(ic->metadata_run & (ic->metadata_run - 1)))
3372                        ic->log2_metadata_run = __ffs(ic->metadata_run);
3373                else
3374                        ic->log2_metadata_run = -1;
3375
3376                get_area_and_offset(ic, ic->provided_data_sectors - 1, &last_area, &last_offset);
3377                last_sector = get_data_sector(ic, last_area, last_offset);
3378                if (last_sector < ic->start || last_sector >= ic->meta_device_sectors)
3379                        return -EINVAL;
3380        } else {
3381                __u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
3382                meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1))
3383                                >> (ic->log2_buffer_sectors + SECTOR_SHIFT);
3384                meta_size <<= ic->log2_buffer_sectors;
3385                if (ic->initial_sectors + meta_size < ic->initial_sectors ||
3386                    ic->initial_sectors + meta_size > ic->meta_device_sectors)
3387                        return -EINVAL;
3388                ic->metadata_run = 1;
3389                ic->log2_metadata_run = 0;
3390        }
3391
3392        return 0;
3393}
3394
3395static void get_provided_data_sectors(struct dm_integrity_c *ic)
3396{
3397        if (!ic->meta_dev) {
3398                int test_bit;
3399                ic->provided_data_sectors = 0;
3400                for (test_bit = fls64(ic->meta_device_sectors) - 1; test_bit >= 3; test_bit--) {
3401                        __u64 prev_data_sectors = ic->provided_data_sectors;
3402
3403                        ic->provided_data_sectors |= (sector_t)1 << test_bit;
3404                        if (calculate_device_limits(ic))
3405                                ic->provided_data_sectors = prev_data_sectors;
3406                }
3407        } else {
3408                ic->provided_data_sectors = ic->data_device_sectors;
3409                ic->provided_data_sectors &= ~(sector_t)(ic->sectors_per_block - 1);
3410        }
3411}
3412
3413static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sectors, unsigned interleave_sectors)
3414{
3415        unsigned journal_sections;
3416        int test_bit;
3417
3418        memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT);
3419        memcpy(ic->sb->magic, SB_MAGIC, 8);
3420        ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size);
3421        ic->sb->log2_sectors_per_block = __ffs(ic->sectors_per_block);
3422        if (ic->journal_mac_alg.alg_string)
3423                ic->sb->flags |= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC);
3424
3425        calculate_journal_section_size(ic);
3426        journal_sections = journal_sectors / ic->journal_section_sectors;
3427        if (!journal_sections)
3428                journal_sections = 1;
3429
3430        if (ic->fix_hmac && (ic->internal_hash_alg.alg_string || ic->journal_mac_alg.alg_string)) {
3431                ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_HMAC);
3432                get_random_bytes(ic->sb->salt, SALT_SIZE);
3433        }
3434
3435        if (!ic->meta_dev) {
3436                if (ic->fix_padding)
3437                        ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_PADDING);
3438                ic->sb->journal_sections = cpu_to_le32(journal_sections);
3439                if (!interleave_sectors)
3440                        interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
3441                ic->sb->log2_interleave_sectors = __fls(interleave_sectors);
3442                ic->sb->log2_interleave_sectors = max((__u8)MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
3443                ic->sb->log2_interleave_sectors = min((__u8)MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
3444
3445                get_provided_data_sectors(ic);
3446                if (!ic->provided_data_sectors)
3447                        return -EINVAL;
3448        } else {
3449                ic->sb->log2_interleave_sectors = 0;
3450
3451                get_provided_data_sectors(ic);
3452                if (!ic->provided_data_sectors)
3453                        return -EINVAL;
3454
3455try_smaller_buffer:
3456                ic->sb->journal_sections = cpu_to_le32(0);
3457                for (test_bit = fls(journal_sections) - 1; test_bit >= 0; test_bit--) {
3458                        __u32 prev_journal_sections = le32_to_cpu(ic->sb->journal_sections);
3459                        __u32 test_journal_sections = prev_journal_sections | (1U << test_bit);
3460                        if (test_journal_sections > journal_sections)
3461                                continue;
3462                        ic->sb->journal_sections = cpu_to_le32(test_journal_sections);
3463                        if (calculate_device_limits(ic))
3464                                ic->sb->journal_sections = cpu_to_le32(prev_journal_sections);
3465
3466                }
3467                if (!le32_to_cpu(ic->sb->journal_sections)) {
3468                        if (ic->log2_buffer_sectors > 3) {
3469                                ic->log2_buffer_sectors--;
3470                                goto try_smaller_buffer;
3471                        }
3472                        return -EINVAL;
3473                }
3474        }
3475
3476        ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
3477
3478        sb_set_version(ic);
3479
3480        return 0;
3481}
3482
3483static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
3484{
3485        struct gendisk *disk = dm_disk(dm_table_get_md(ti->table));
3486        struct blk_integrity bi;
3487
3488        memset(&bi, 0, sizeof(bi));
3489        bi.profile = &dm_integrity_profile;
3490        bi.tuple_size = ic->tag_size;
3491        bi.tag_size = bi.tuple_size;
3492        bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT;
3493
3494        blk_integrity_register(disk, &bi);
3495        blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
3496}
3497
3498static void dm_integrity_free_page_list(struct page_list *pl)
3499{
3500        unsigned i;
3501
3502        if (!pl)
3503                return;
3504        for (i = 0; pl[i].page; i++)
3505                __free_page(pl[i].page);
3506        kvfree(pl);
3507}
3508
3509static struct page_list *dm_integrity_alloc_page_list(unsigned n_pages)
3510{
3511        struct page_list *pl;
3512        unsigned i;
3513
3514        pl = kvmalloc_array(n_pages + 1, sizeof(struct page_list), GFP_KERNEL | __GFP_ZERO);
3515        if (!pl)
3516                return NULL;
3517
3518        for (i = 0; i < n_pages; i++) {
3519                pl[i].page = alloc_page(GFP_KERNEL);
3520                if (!pl[i].page) {
3521                        dm_integrity_free_page_list(pl);
3522                        return NULL;
3523                }
3524                if (i)
3525                        pl[i - 1].next = &pl[i];
3526        }
3527        pl[i].page = NULL;
3528        pl[i].next = NULL;
3529
3530        return pl;
3531}
3532
3533static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl)
3534{
3535        unsigned i;
3536        for (i = 0; i < ic->journal_sections; i++)
3537                kvfree(sl[i]);
3538        kvfree(sl);
3539}
3540
3541static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic,
3542                                                                   struct page_list *pl)
3543{
3544        struct scatterlist **sl;
3545        unsigned i;
3546
3547        sl = kvmalloc_array(ic->journal_sections,
3548                            sizeof(struct scatterlist *),
3549                            GFP_KERNEL | __GFP_ZERO);
3550        if (!sl)
3551                return NULL;
3552
3553        for (i = 0; i < ic->journal_sections; i++) {
3554                struct scatterlist *s;
3555                unsigned start_index, start_offset;
3556                unsigned end_index, end_offset;
3557                unsigned n_pages;
3558                unsigned idx;
3559
3560                page_list_location(ic, i, 0, &start_index, &start_offset);
3561                page_list_location(ic, i, ic->journal_section_sectors - 1,
3562                                   &end_index, &end_offset);
3563
3564                n_pages = (end_index - start_index + 1);
3565
3566                s = kvmalloc_array(n_pages, sizeof(struct scatterlist),
3567                                   GFP_KERNEL);
3568                if (!s) {
3569                        dm_integrity_free_journal_scatterlist(ic, sl);
3570                        return NULL;
3571                }
3572
3573                sg_init_table(s, n_pages);
3574                for (idx = start_index; idx <= end_index; idx++) {
3575                        char *va = lowmem_page_address(pl[idx].page);
3576                        unsigned start = 0, end = PAGE_SIZE;
3577                        if (idx == start_index)
3578                                start = start_offset;
3579                        if (idx == end_index)
3580                                end = end_offset + (1 << SECTOR_SHIFT);
3581                        sg_set_buf(&s[idx - start_index], va + start, end - start);
3582                }
3583
3584                sl[i] = s;
3585        }
3586
3587        return sl;
3588}
3589
3590static void free_alg(struct alg_spec *a)
3591{
3592        kfree_sensitive(a->alg_string);
3593        kfree_sensitive(a->key);
3594        memset(a, 0, sizeof *a);
3595}
3596
3597static int get_alg_and_key(const char *arg, struct alg_spec *a, char **error, char *error_inval)
3598{
3599        char *k;
3600
3601        free_alg(a);
3602
3603        a->alg_string = kstrdup(strchr(arg, ':') + 1, GFP_KERNEL);
3604        if (!a->alg_string)
3605                goto nomem;
3606
3607        k = strchr(a->alg_string, ':');
3608        if (k) {
3609                *k = 0;
3610                a->key_string = k + 1;
3611                if (strlen(a->key_string) & 1)
3612                        goto inval;
3613
3614                a->key_size = strlen(a->key_string) / 2;
3615                a->key = kmalloc(a->key_size, GFP_KERNEL);
3616                if (!a->key)
3617                        goto nomem;
3618                if (hex2bin(a->key, a->key_string, a->key_size))
3619                        goto inval;
3620        }
3621
3622        return 0;
3623inval:
3624        *error = error_inval;
3625        return -EINVAL;
3626nomem:
3627        *error = "Out of memory for an argument";
3628        return -ENOMEM;
3629}
3630
3631static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
3632                   char *error_alg, char *error_key)
3633{
3634        int r;
3635
3636        if (a->alg_string) {
3637                *hash = crypto_alloc_shash(a->alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
3638                if (IS_ERR(*hash)) {
3639                        *error = error_alg;
3640                        r = PTR_ERR(*hash);
3641                        *hash = NULL;
3642                        return r;
3643                }
3644
3645                if (a->key) {
3646                        r = crypto_shash_setkey(*hash, a->key, a->key_size);
3647                        if (r) {
3648                                *error = error_key;
3649                                return r;
3650                        }
3651                } else if (crypto_shash_get_flags(*hash) & CRYPTO_TFM_NEED_KEY) {
3652                        *error = error_key;
3653                        return -ENOKEY;
3654                }
3655        }
3656
3657        return 0;
3658}
3659
3660static int create_journal(struct dm_integrity_c *ic, char **error)
3661{
3662        int r = 0;
3663        unsigned i;
3664        __u64 journal_pages, journal_desc_size, journal_tree_size;
3665        unsigned char *crypt_data = NULL, *crypt_iv = NULL;
3666        struct skcipher_request *req = NULL;
3667
3668        ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
3669        ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
3670        ic->commit_ids[2] = cpu_to_le64(0x3333333333333333ULL);
3671        ic->commit_ids[3] = cpu_to_le64(0x4444444444444444ULL);
3672
3673        journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors,
3674                                PAGE_SIZE >> SECTOR_SHIFT) >> (PAGE_SHIFT - SECTOR_SHIFT);
3675        journal_desc_size = journal_pages * sizeof(struct page_list);
3676        if (journal_pages >= totalram_pages() - totalhigh_pages() || journal_desc_size > ULONG_MAX) {
3677                *error = "Journal doesn't fit into memory";
3678                r = -ENOMEM;
3679                goto bad;
3680        }
3681        ic->journal_pages = journal_pages;
3682
3683        ic->journal = dm_integrity_alloc_page_list(ic->journal_pages);
3684        if (!ic->journal) {
3685                *error = "Could not allocate memory for journal";
3686                r = -ENOMEM;
3687                goto bad;
3688        }
3689        if (ic->journal_crypt_alg.alg_string) {
3690                unsigned ivsize, blocksize;
3691                struct journal_completion comp;
3692
3693                comp.ic = ic;
3694                ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
3695                if (IS_ERR(ic->journal_crypt)) {
3696                        *error = "Invalid journal cipher";
3697                        r = PTR_ERR(ic->journal_crypt);
3698                        ic->journal_crypt = NULL;
3699                        goto bad;
3700                }
3701                ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
3702                blocksize = crypto_skcipher_blocksize(ic->journal_crypt);
3703
3704                if (ic->journal_crypt_alg.key) {
3705                        r = crypto_skcipher_setkey(ic->journal_crypt, ic->journal_crypt_alg.key,
3706                                                   ic->journal_crypt_alg.key_size);
3707                        if (r) {
3708                                *error = "Error setting encryption key";
3709                                goto bad;
3710                        }
3711                }
3712                DEBUG_print("cipher %s, block size %u iv size %u\n",
3713                            ic->journal_crypt_alg.alg_string, blocksize, ivsize);
3714
3715                ic->journal_io = dm_integrity_alloc_page_list(ic->journal_pages);
3716                if (!ic->journal_io) {
3717                        *error = "Could not allocate memory for journal io";
3718                        r = -ENOMEM;
3719                        goto bad;
3720                }
3721
3722                if (blocksize == 1) {
3723                        struct scatterlist *sg;
3724
3725                        req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3726                        if (!req) {
3727                                *error = "Could not allocate crypt request";
3728                                r = -ENOMEM;
3729                                goto bad;
3730                        }
3731
3732                        crypt_iv = kzalloc(ivsize, GFP_KERNEL);
3733                        if (!crypt_iv) {
3734                                *error = "Could not allocate iv";
3735                                r = -ENOMEM;
3736                                goto bad;
3737                        }
3738
3739                        ic->journal_xor = dm_integrity_alloc_page_list(ic->journal_pages);
3740                        if (!ic->journal_xor) {
3741                                *error = "Could not allocate memory for journal xor";
3742                                r = -ENOMEM;
3743                                goto bad;
3744                        }
3745
3746                        sg = kvmalloc_array(ic->journal_pages + 1,
3747                                            sizeof(struct scatterlist),
3748                                            GFP_KERNEL);
3749                        if (!sg) {
3750                                *error = "Unable to allocate sg list";
3751                                r = -ENOMEM;
3752                                goto bad;
3753                        }
3754                        sg_init_table(sg, ic->journal_pages + 1);
3755                        for (i = 0; i < ic->journal_pages; i++) {
3756                                char *va = lowmem_page_address(ic->journal_xor[i].page);
3757                                clear_page(va);
3758                                sg_set_buf(&sg[i], va, PAGE_SIZE);
3759                        }
3760                        sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
3761
3762                        skcipher_request_set_crypt(req, sg, sg,
3763                                                   PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
3764                        init_completion(&comp.comp);
3765                        comp.in_flight = (atomic_t)ATOMIC_INIT(1);
3766                        if (do_crypt(true, req, &comp))
3767                                wait_for_completion(&comp.comp);
3768                        kvfree(sg);
3769                        r = dm_integrity_failed(ic);
3770                        if (r) {
3771                                *error = "Unable to encrypt journal";
3772                                goto bad;
3773                        }
3774                        DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data");
3775
3776                        crypto_free_skcipher(ic->journal_crypt);
3777                        ic->journal_crypt = NULL;
3778                } else {
3779                        unsigned crypt_len = roundup(ivsize, blocksize);
3780
3781                        req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3782                        if (!req) {
3783                                *error = "Could not allocate crypt request";
3784                                r = -ENOMEM;
3785                                goto bad;
3786                        }
3787
3788                        crypt_iv = kmalloc(ivsize, GFP_KERNEL);
3789                        if (!crypt_iv) {
3790                                *error = "Could not allocate iv";
3791                                r = -ENOMEM;
3792                                goto bad;
3793                        }
3794
3795                        crypt_data = kmalloc(crypt_len, GFP_KERNEL);
3796                        if (!crypt_data) {
3797                                *error = "Unable to allocate crypt data";
3798                                r = -ENOMEM;
3799                                goto bad;
3800                        }
3801
3802                        ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
3803                        if (!ic->journal_scatterlist) {
3804                                *error = "Unable to allocate sg list";
3805                                r = -ENOMEM;
3806                                goto bad;
3807                        }
3808                        ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal_io);
3809                        if (!ic->journal_io_scatterlist) {
3810                                *error = "Unable to allocate sg list";
3811                                r = -ENOMEM;
3812                                goto bad;
3813                        }
3814                        ic->sk_requests = kvmalloc_array(ic->journal_sections,
3815                                                         sizeof(struct skcipher_request *),
3816                                                         GFP_KERNEL | __GFP_ZERO);
3817                        if (!ic->sk_requests) {
3818                                *error = "Unable to allocate sk requests";
3819                                r = -ENOMEM;
3820                                goto bad;
3821                        }
3822                        for (i = 0; i < ic->journal_sections; i++) {
3823                                struct scatterlist sg;
3824                                struct skcipher_request *section_req;
3825                                __le32 section_le = cpu_to_le32(i);
3826
3827                                memset(crypt_iv, 0x00, ivsize);
3828                                memset(crypt_data, 0x00, crypt_len);
3829                                memcpy(crypt_data, &section_le, min((size_t)crypt_len, sizeof(section_le)));
3830
3831                                sg_init_one(&sg, crypt_data, crypt_len);
3832                                skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv);
3833                                init_completion(&comp.comp);
3834                                comp.in_flight = (atomic_t)ATOMIC_INIT(1);
3835                                if (do_crypt(true, req, &comp))
3836                                        wait_for_completion(&comp.comp);
3837
3838                                r = dm_integrity_failed(ic);
3839                                if (r) {
3840                                        *error = "Unable to generate iv";
3841                                        goto bad;
3842                                }
3843
3844                                section_req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3845                                if (!section_req) {
3846                                        *error = "Unable to allocate crypt request";
3847                                        r = -ENOMEM;
3848                                        goto bad;
3849                                }
3850                                section_req->iv = kmalloc_array(ivsize, 2,
3851                                                                GFP_KERNEL);
3852                                if (!section_req->iv) {
3853                                        skcipher_request_free(section_req);
3854                                        *error = "Unable to allocate iv";
3855                                        r = -ENOMEM;
3856                                        goto bad;
3857                                }
3858                                memcpy(section_req->iv + ivsize, crypt_data, ivsize);
3859                                section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT;
3860                                ic->sk_requests[i] = section_req;
3861                                DEBUG_bytes(crypt_data, ivsize, "iv(%u)", i);
3862                        }
3863                }
3864        }
3865
3866        for (i = 0; i < N_COMMIT_IDS; i++) {
3867                unsigned j;
3868retest_commit_id:
3869                for (j = 0; j < i; j++) {
3870                        if (ic->commit_ids[j] == ic->commit_ids[i]) {
3871                                ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1);
3872                                goto retest_commit_id;
3873                        }
3874                }
3875                DEBUG_print("commit id %u: %016llx\n", i, ic->commit_ids[i]);
3876        }
3877
3878        journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node);
3879        if (journal_tree_size > ULONG_MAX) {
3880                *error = "Journal doesn't fit into memory";
3881                r = -ENOMEM;
3882                goto bad;
3883        }
3884        ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL);
3885        if (!ic->journal_tree) {
3886                *error = "Could not allocate memory for journal tree";
3887                r = -ENOMEM;
3888        }
3889bad:
3890        kfree(crypt_data);
3891        kfree(crypt_iv);
3892        skcipher_request_free(req);
3893
3894        return r;
3895}
3896
3897/*
3898 * Construct a integrity mapping
3899 *
3900 * Arguments:
3901 *      device
3902 *      offset from the start of the device
3903 *      tag size
3904 *      D - direct writes, J - journal writes, B - bitmap mode, R - recovery mode
3905 *      number of optional arguments
3906 *      optional arguments:
3907 *              journal_sectors
3908 *              interleave_sectors
3909 *              buffer_sectors
3910 *              journal_watermark
3911 *              commit_time
3912 *              meta_device
3913 *              block_size
3914 *              sectors_per_bit
3915 *              bitmap_flush_interval
3916 *              internal_hash
3917 *              journal_crypt
3918 *              journal_mac
3919 *              recalculate
3920 */
3921static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
3922{
3923        struct dm_integrity_c *ic;
3924        char dummy;
3925        int r;
3926        unsigned extra_args;
3927        struct dm_arg_set as;
3928        static const struct dm_arg _args[] = {
3929                {0, 18, "Invalid number of feature args"},
3930        };
3931        unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
3932        bool should_write_sb;
3933        __u64 threshold;
3934        unsigned long long start;
3935        __s8 log2_sectors_per_bitmap_bit = -1;
3936        __s8 log2_blocks_per_bitmap_bit;
3937        __u64 bits_in_journal;
3938        __u64 n_bitmap_bits;
3939
3940#define DIRECT_ARGUMENTS        4
3941
3942        if (argc <= DIRECT_ARGUMENTS) {
3943                ti->error = "Invalid argument count";
3944                return -EINVAL;
3945        }
3946
3947        ic = kzalloc(sizeof(struct dm_integrity_c), GFP_KERNEL);
3948        if (!ic) {
3949                ti->error = "Cannot allocate integrity context";
3950                return -ENOMEM;
3951        }
3952        ti->private = ic;
3953        ti->per_io_data_size = sizeof(struct dm_integrity_io);
3954        ic->ti = ti;
3955
3956        ic->in_progress = RB_ROOT;
3957        INIT_LIST_HEAD(&ic->wait_list);
3958        init_waitqueue_head(&ic->endio_wait);
3959        bio_list_init(&ic->flush_bio_list);
3960        init_waitqueue_head(&ic->copy_to_journal_wait);
3961        init_completion(&ic->crypto_backoff);
3962        atomic64_set(&ic->number_of_mismatches, 0);
3963        ic->bitmap_flush_interval = BITMAP_FLUSH_INTERVAL;
3964
3965        r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev);
3966        if (r) {
3967                ti->error = "Device lookup failed";
3968                goto bad;
3969        }
3970
3971        if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) {
3972                ti->error = "Invalid starting offset";
3973                r = -EINVAL;
3974                goto bad;
3975        }
3976        ic->start = start;
3977
3978        if (strcmp(argv[2], "-")) {
3979                if (sscanf(argv[2], "%u%c", &ic->tag_size, &dummy) != 1 || !ic->tag_size) {
3980                        ti->error = "Invalid tag size";
3981                        r = -EINVAL;
3982                        goto bad;
3983                }
3984        }
3985
3986        if (!strcmp(argv[3], "J") || !strcmp(argv[3], "B") ||
3987            !strcmp(argv[3], "D") || !strcmp(argv[3], "R")) {
3988                ic->mode = argv[3][0];
3989        } else {
3990                ti->error = "Invalid mode (expecting J, B, D, R)";
3991                r = -EINVAL;
3992                goto bad;
3993        }
3994
3995        journal_sectors = 0;
3996        interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
3997        buffer_sectors = DEFAULT_BUFFER_SECTORS;
3998        journal_watermark = DEFAULT_JOURNAL_WATERMARK;
3999        sync_msec = DEFAULT_SYNC_MSEC;
4000        ic->sectors_per_block = 1;
4001
4002        as.argc = argc - DIRECT_ARGUMENTS;
4003        as.argv = argv + DIRECT_ARGUMENTS;
4004        r = dm_read_arg_group(_args, &as, &extra_args, &ti->error);
4005        if (r)
4006                goto bad;
4007
4008        while (extra_args--) {
4009                const char *opt_string;
4010                unsigned val;
4011                unsigned long long llval;
4012                opt_string = dm_shift_arg(&as);
4013                if (!opt_string) {
4014                        r = -EINVAL;
4015                        ti->error = "Not enough feature arguments";
4016                        goto bad;
4017                }
4018                if (sscanf(opt_string, "journal_sectors:%u%c", &val, &dummy) == 1)
4019                        journal_sectors = val ? val : 1;
4020                else if (sscanf(opt_string, "interleave_sectors:%u%c", &val, &dummy) == 1)
4021                        interleave_sectors = val;
4022                else if (sscanf(opt_string, "buffer_sectors:%u%c", &val, &dummy) == 1)
4023                        buffer_sectors = val;
4024                else if (sscanf(opt_string, "journal_watermark:%u%c", &val, &dummy) == 1 && val <= 100)
4025                        journal_watermark = val;
4026                else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
4027                        sync_msec = val;
4028                else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) {
4029                        if (ic->meta_dev) {
4030                                dm_put_device(ti, ic->meta_dev);
4031                                ic->meta_dev = NULL;
4032                        }
4033                        r = dm_get_device(ti, strchr(opt_string, ':') + 1,
4034                                          dm_table_get_mode(ti->table), &ic->meta_dev);
4035                        if (r) {
4036                                ti->error = "Device lookup failed";
4037                                goto bad;
4038                        }
4039                } else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) {
4040                        if (val < 1 << SECTOR_SHIFT ||
4041                            val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT ||
4042                            (val & (val -1))) {
4043                                r = -EINVAL;
4044                                ti->error = "Invalid block_size argument";
4045                                goto bad;
4046                        }
4047                        ic->sectors_per_block = val >> SECTOR_SHIFT;
4048                } else if (sscanf(opt_string, "sectors_per_bit:%llu%c", &llval, &dummy) == 1) {
4049                        log2_sectors_per_bitmap_bit = !llval ? 0 : __ilog2_u64(llval);
4050                } else if (sscanf(opt_string, "bitmap_flush_interval:%u%c", &val, &dummy) == 1) {
4051                        if (val >= (uint64_t)UINT_MAX * 1000 / HZ) {
4052                                r = -EINVAL;
4053                                ti->error = "Invalid bitmap_flush_interval argument";
4054                                goto bad;
4055                        }
4056                        ic->bitmap_flush_interval = msecs_to_jiffies(val);
4057                } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
4058                        r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
4059                                            "Invalid internal_hash argument");
4060                        if (r)
4061                                goto bad;
4062                } else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
4063                        r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
4064                                            "Invalid journal_crypt argument");
4065                        if (r)
4066                                goto bad;
4067                } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
4068                        r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error,
4069                                            "Invalid journal_mac argument");
4070                        if (r)
4071                                goto bad;
4072                } else if (!strcmp(opt_string, "recalculate")) {
4073                        ic->recalculate_flag = true;
4074                } else if (!strcmp(opt_string, "reset_recalculate")) {
4075                        ic->recalculate_flag = true;
4076                        ic->reset_recalculate_flag = true;
4077                } else if (!strcmp(opt_string, "allow_discards")) {
4078                        ic->discard = true;
4079                } else if (!strcmp(opt_string, "fix_padding")) {
4080                        ic->fix_padding = true;
4081                } else if (!strcmp(opt_string, "fix_hmac")) {
4082                        ic->fix_hmac = true;
4083                } else if (!strcmp(opt_string, "legacy_recalculate")) {
4084                        ic->legacy_recalculate = true;
4085                } else {
4086                        r = -EINVAL;
4087                        ti->error = "Invalid argument";
4088                        goto bad;
4089                }
4090        }
4091
4092        ic->data_device_sectors = i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT;
4093        if (!ic->meta_dev)
4094                ic->meta_device_sectors = ic->data_device_sectors;
4095        else
4096                ic->meta_device_sectors = i_size_read(ic->meta_dev->bdev->bd_inode) >> SECTOR_SHIFT;
4097
4098        if (!journal_sectors) {
4099                journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS,
4100                                      ic->data_device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR);
4101        }
4102
4103        if (!buffer_sectors)
4104                buffer_sectors = 1;
4105        ic->log2_buffer_sectors = min((int)__fls(buffer_sectors), 31 - SECTOR_SHIFT);
4106
4107        r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error,
4108                    "Invalid internal hash", "Error setting internal hash key");
4109        if (r)
4110                goto bad;
4111
4112        r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error,
4113                    "Invalid journal mac", "Error setting journal mac key");
4114        if (r)
4115                goto bad;
4116
4117        if (!ic->tag_size) {
4118                if (!ic->internal_hash) {
4119                        ti->error = "Unknown tag size";
4120                        r = -EINVAL;
4121                        goto bad;
4122                }
4123                ic->tag_size = crypto_shash_digestsize(ic->internal_hash);
4124        }
4125        if (ic->tag_size > MAX_TAG_SIZE) {
4126                ti->error = "Too big tag size";
4127                r = -EINVAL;
4128                goto bad;
4129        }
4130        if (!(ic->tag_size & (ic->tag_size - 1)))
4131                ic->log2_tag_size = __ffs(ic->tag_size);
4132        else
4133                ic->log2_tag_size = -1;
4134
4135        if (ic->mode == 'B' && !ic->internal_hash) {
4136                r = -EINVAL;
4137                ti->error = "Bitmap mode can be only used with internal hash";
4138                goto bad;
4139        }
4140
4141        if (ic->discard && !ic->internal_hash) {
4142                r = -EINVAL;
4143                ti->error = "Discard can be only used with internal hash";
4144                goto bad;
4145        }
4146
4147        ic->autocommit_jiffies = msecs_to_jiffies(sync_msec);
4148        ic->autocommit_msec = sync_msec;
4149        timer_setup(&ic->autocommit_timer, autocommit_fn, 0);
4150
4151        ic->io = dm_io_client_create();
4152        if (IS_ERR(ic->io)) {
4153                r = PTR_ERR(ic->io);
4154                ic->io = NULL;
4155                ti->error = "Cannot allocate dm io";
4156                goto bad;
4157        }
4158
4159        r = mempool_init_slab_pool(&ic->journal_io_mempool, JOURNAL_IO_MEMPOOL, journal_io_cache);
4160        if (r) {
4161                ti->error = "Cannot allocate mempool";
4162                goto bad;
4163        }
4164
4165        ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
4166                                          WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE);
4167        if (!ic->metadata_wq) {
4168                ti->error = "Cannot allocate workqueue";
4169                r = -ENOMEM;
4170                goto bad;
4171        }
4172
4173        /*
4174         * If this workqueue were percpu, it would cause bio reordering
4175         * and reduced performance.
4176         */
4177        ic->wait_wq = alloc_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
4178        if (!ic->wait_wq) {
4179                ti->error = "Cannot allocate workqueue";
4180                r = -ENOMEM;
4181                goto bad;
4182        }
4183
4184        ic->offload_wq = alloc_workqueue("dm-integrity-offload", WQ_MEM_RECLAIM,
4185                                          METADATA_WORKQUEUE_MAX_ACTIVE);
4186        if (!ic->offload_wq) {
4187                ti->error = "Cannot allocate workqueue";
4188                r = -ENOMEM;
4189                goto bad;
4190        }
4191
4192        ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1);
4193        if (!ic->commit_wq) {
4194                ti->error = "Cannot allocate workqueue";
4195                r = -ENOMEM;
4196                goto bad;
4197        }
4198        INIT_WORK(&ic->commit_work, integrity_commit);
4199
4200        if (ic->mode == 'J' || ic->mode == 'B') {
4201                ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1);
4202                if (!ic->writer_wq) {
4203                        ti->error = "Cannot allocate workqueue";
4204                        r = -ENOMEM;
4205                        goto bad;
4206                }
4207                INIT_WORK(&ic->writer_work, integrity_writer);
4208        }
4209
4210        ic->sb = alloc_pages_exact(SB_SECTORS << SECTOR_SHIFT, GFP_KERNEL);
4211        if (!ic->sb) {
4212                r = -ENOMEM;
4213                ti->error = "Cannot allocate superblock area";
4214                goto bad;
4215        }
4216
4217        r = sync_rw_sb(ic, REQ_OP_READ, 0);
4218        if (r) {
4219                ti->error = "Error reading superblock";
4220                goto bad;
4221        }
4222        should_write_sb = false;
4223        if (memcmp(ic->sb->magic, SB_MAGIC, 8)) {
4224                if (ic->mode != 'R') {
4225                        if (memchr_inv(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT)) {
4226                                r = -EINVAL;
4227                                ti->error = "The device is not initialized";
4228                                goto bad;
4229                        }
4230                }
4231
4232                r = initialize_superblock(ic, journal_sectors, interleave_sectors);
4233                if (r) {
4234                        ti->error = "Could not initialize superblock";
4235                        goto bad;
4236                }
4237                if (ic->mode != 'R')
4238                        should_write_sb = true;
4239        }
4240
4241        if (!ic->sb->version || ic->sb->version > SB_VERSION_5) {
4242                r = -EINVAL;
4243                ti->error = "Unknown version";
4244                goto bad;
4245        }
4246        if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) {
4247                r = -EINVAL;
4248                ti->error = "Tag size doesn't match the information in superblock";
4249                goto bad;
4250        }
4251        if (ic->sb->log2_sectors_per_block != __ffs(ic->sectors_per_block)) {
4252                r = -EINVAL;
4253                ti->error = "Block size doesn't match the information in superblock";
4254                goto bad;
4255        }
4256        if (!le32_to_cpu(ic->sb->journal_sections)) {
4257                r = -EINVAL;
4258                ti->error = "Corrupted superblock, journal_sections is 0";
4259                goto bad;
4260        }
4261        /* make sure that ti->max_io_len doesn't overflow */
4262        if (!ic->meta_dev) {
4263                if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
4264                    ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
4265                        r = -EINVAL;
4266                        ti->error = "Invalid interleave_sectors in the superblock";
4267                        goto bad;
4268                }
4269        } else {
4270                if (ic->sb->log2_interleave_sectors) {
4271                        r = -EINVAL;
4272                        ti->error = "Invalid interleave_sectors in the superblock";
4273                        goto bad;
4274                }
4275        }
4276        if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string) {
4277                r = -EINVAL;
4278                ti->error = "Journal mac mismatch";
4279                goto bad;
4280        }
4281
4282        get_provided_data_sectors(ic);
4283        if (!ic->provided_data_sectors) {
4284                r = -EINVAL;
4285                ti->error = "The device is too small";
4286                goto bad;
4287        }
4288
4289try_smaller_buffer:
4290        r = calculate_device_limits(ic);
4291        if (r) {
4292                if (ic->meta_dev) {
4293                        if (ic->log2_buffer_sectors > 3) {
4294                                ic->log2_buffer_sectors--;
4295                                goto try_smaller_buffer;
4296                        }
4297                }
4298                ti->error = "The device is too small";
4299                goto bad;
4300        }
4301
4302        if (log2_sectors_per_bitmap_bit < 0)
4303                log2_sectors_per_bitmap_bit = __fls(DEFAULT_SECTORS_PER_BITMAP_BIT);
4304        if (log2_sectors_per_bitmap_bit < ic->sb->log2_sectors_per_block)
4305                log2_sectors_per_bitmap_bit = ic->sb->log2_sectors_per_block;
4306
4307        bits_in_journal = ((__u64)ic->journal_section_sectors * ic->journal_sections) << (SECTOR_SHIFT + 3);
4308        if (bits_in_journal > UINT_MAX)
4309                bits_in_journal = UINT_MAX;
4310        while (bits_in_journal < (ic->provided_data_sectors + ((sector_t)1 << log2_sectors_per_bitmap_bit) - 1) >> log2_sectors_per_bitmap_bit)
4311                log2_sectors_per_bitmap_bit++;
4312
4313        log2_blocks_per_bitmap_bit = log2_sectors_per_bitmap_bit - ic->sb->log2_sectors_per_block;
4314        ic->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
4315        if (should_write_sb) {
4316                ic->sb->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
4317        }
4318        n_bitmap_bits = ((ic->provided_data_sectors >> ic->sb->log2_sectors_per_block)
4319                                + (((sector_t)1 << log2_blocks_per_bitmap_bit) - 1)) >> log2_blocks_per_bitmap_bit;
4320        ic->n_bitmap_blocks = DIV_ROUND_UP(n_bitmap_bits, BITMAP_BLOCK_SIZE * 8);
4321
4322        if (!ic->meta_dev)
4323                ic->log2_buffer_sectors = min(ic->log2_buffer_sectors, (__u8)__ffs(ic->metadata_run));
4324
4325        if (ti->len > ic->provided_data_sectors) {
4326                r = -EINVAL;
4327                ti->error = "Not enough provided sectors for requested mapping size";
4328                goto bad;
4329        }
4330
4331
4332        threshold = (__u64)ic->journal_entries * (100 - journal_watermark);
4333        threshold += 50;
4334        do_div(threshold, 100);
4335        ic->free_sectors_threshold = threshold;
4336
4337        DEBUG_print("initialized:\n");
4338        DEBUG_print("   integrity_tag_size %u\n", le16_to_cpu(ic->sb->integrity_tag_size));
4339        DEBUG_print("   journal_entry_size %u\n", ic->journal_entry_size);
4340        DEBUG_print("   journal_entries_per_sector %u\n", ic->journal_entries_per_sector);
4341        DEBUG_print("   journal_section_entries %u\n", ic->journal_section_entries);
4342        DEBUG_print("   journal_section_sectors %u\n", ic->journal_section_sectors);
4343        DEBUG_print("   journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
4344        DEBUG_print("   journal_entries %u\n", ic->journal_entries);
4345        DEBUG_print("   log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
4346        DEBUG_print("   data_device_sectors 0x%llx\n", i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT);
4347        DEBUG_print("   initial_sectors 0x%x\n", ic->initial_sectors);
4348        DEBUG_print("   metadata_run 0x%x\n", ic->metadata_run);
4349        DEBUG_print("   log2_metadata_run %d\n", ic->log2_metadata_run);
4350        DEBUG_print("   provided_data_sectors 0x%llx (%llu)\n", ic->provided_data_sectors, ic->provided_data_sectors);
4351        DEBUG_print("   log2_buffer_sectors %u\n", ic->log2_buffer_sectors);
4352        DEBUG_print("   bits_in_journal %llu\n", bits_in_journal);
4353
4354        if (ic->recalculate_flag && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) {
4355                ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
4356                ic->sb->recalc_sector = cpu_to_le64(0);
4357        }
4358
4359        if (ic->internal_hash) {
4360                ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
4361                if (!ic->recalc_wq ) {
4362                        ti->error = "Cannot allocate workqueue";
4363                        r = -ENOMEM;
4364                        goto bad;
4365                }
4366                INIT_WORK(&ic->recalc_work, integrity_recalc);
4367                ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT);
4368                if (!ic->recalc_buffer) {
4369                        ti->error = "Cannot allocate buffer for recalculating";
4370                        r = -ENOMEM;
4371                        goto bad;
4372                }
4373                ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
4374                                                 ic->tag_size, GFP_KERNEL);
4375                if (!ic->recalc_tags) {
4376                        ti->error = "Cannot allocate tags for recalculating";
4377                        r = -ENOMEM;
4378                        goto bad;
4379                }
4380        } else {
4381                if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
4382                        ti->error = "Recalculate can only be specified with internal_hash";
4383                        r = -EINVAL;
4384                        goto bad;
4385                }
4386        }
4387
4388        if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
4389            le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors &&
4390            dm_integrity_disable_recalculate(ic)) {
4391                ti->error = "Recalculating with HMAC is disabled for security reasons - if you really need it, use the argument \"legacy_recalculate\"";
4392                r = -EOPNOTSUPP;
4393                goto bad;
4394        }
4395
4396        ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
4397                        1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL);
4398        if (IS_ERR(ic->bufio)) {
4399                r = PTR_ERR(ic->bufio);
4400                ti->error = "Cannot initialize dm-bufio";
4401                ic->bufio = NULL;
4402                goto bad;
4403        }
4404        dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors);
4405
4406        if (ic->mode != 'R') {
4407                r = create_journal(ic, &ti->error);
4408                if (r)
4409                        goto bad;
4410
4411        }
4412
4413        if (ic->mode == 'B') {
4414                unsigned i;
4415                unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
4416
4417                ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
4418                if (!ic->recalc_bitmap) {
4419                        r = -ENOMEM;
4420                        goto bad;
4421                }
4422                ic->may_write_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
4423                if (!ic->may_write_bitmap) {
4424                        r = -ENOMEM;
4425                        goto bad;
4426                }
4427                ic->bbs = kvmalloc_array(ic->n_bitmap_blocks, sizeof(struct bitmap_block_status), GFP_KERNEL);
4428                if (!ic->bbs) {
4429                        r = -ENOMEM;
4430                        goto bad;
4431                }
4432                INIT_DELAYED_WORK(&ic->bitmap_flush_work, bitmap_flush_work);
4433                for (i = 0; i < ic->n_bitmap_blocks; i++) {
4434                        struct bitmap_block_status *bbs = &ic->bbs[i];
4435                        unsigned sector, pl_index, pl_offset;
4436
4437                        INIT_WORK(&bbs->work, bitmap_block_work);
4438                        bbs->ic = ic;
4439                        bbs->idx = i;
4440                        bio_list_init(&bbs->bio_queue);
4441                        spin_lock_init(&bbs->bio_queue_lock);
4442
4443                        sector = i * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT);
4444                        pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
4445                        pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
4446
4447                        bbs->bitmap = lowmem_page_address(ic->journal[pl_index].page) + pl_offset;
4448                }
4449        }
4450
4451        if (should_write_sb) {
4452                int r;
4453
4454                init_journal(ic, 0, ic->journal_sections, 0);
4455                r = dm_integrity_failed(ic);
4456                if (unlikely(r)) {
4457                        ti->error = "Error initializing journal";
4458                        goto bad;
4459                }
4460                r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
4461                if (r) {
4462                        ti->error = "Error initializing superblock";
4463                        goto bad;
4464                }
4465                ic->just_formatted = true;
4466        }
4467
4468        if (!ic->meta_dev) {
4469                r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors);
4470                if (r)
4471                        goto bad;
4472        }
4473        if (ic->mode == 'B') {
4474                unsigned max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8);
4475                if (!max_io_len)
4476                        max_io_len = 1U << 31;
4477                DEBUG_print("max_io_len: old %u, new %u\n", ti->max_io_len, max_io_len);
4478                if (!ti->max_io_len || ti->max_io_len > max_io_len) {
4479                        r = dm_set_target_max_io_len(ti, max_io_len);
4480                        if (r)
4481                                goto bad;
4482                }
4483        }
4484
4485        if (!ic->internal_hash)
4486                dm_integrity_set(ti, ic);
4487
4488        ti->num_flush_bios = 1;
4489        ti->flush_supported = true;
4490        if (ic->discard)
4491                ti->num_discard_bios = 1;
4492
4493        return 0;
4494
4495bad:
4496        dm_integrity_dtr(ti);
4497        return r;
4498}
4499
4500static void dm_integrity_dtr(struct dm_target *ti)
4501{
4502        struct dm_integrity_c *ic = ti->private;
4503
4504        BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
4505        BUG_ON(!list_empty(&ic->wait_list));
4506
4507        if (ic->metadata_wq)
4508                destroy_workqueue(ic->metadata_wq);
4509        if (ic->wait_wq)
4510                destroy_workqueue(ic->wait_wq);
4511        if (ic->offload_wq)
4512                destroy_workqueue(ic->offload_wq);
4513        if (ic->commit_wq)
4514                destroy_workqueue(ic->commit_wq);
4515        if (ic->writer_wq)
4516                destroy_workqueue(ic->writer_wq);
4517        if (ic->recalc_wq)
4518                destroy_workqueue(ic->recalc_wq);
4519        vfree(ic->recalc_buffer);
4520        kvfree(ic->recalc_tags);
4521        kvfree(ic->bbs);
4522        if (ic->bufio)
4523                dm_bufio_client_destroy(ic->bufio);
4524        mempool_exit(&ic->journal_io_mempool);
4525        if (ic->io)
4526                dm_io_client_destroy(ic->io);
4527        if (ic->dev)
4528                dm_put_device(ti, ic->dev);
4529        if (ic->meta_dev)
4530                dm_put_device(ti, ic->meta_dev);
4531        dm_integrity_free_page_list(ic->journal);
4532        dm_integrity_free_page_list(ic->journal_io);
4533        dm_integrity_free_page_list(ic->journal_xor);
4534        dm_integrity_free_page_list(ic->recalc_bitmap);
4535        dm_integrity_free_page_list(ic->may_write_bitmap);
4536        if (ic->journal_scatterlist)
4537                dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist);
4538        if (ic->journal_io_scatterlist)
4539                dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist);
4540        if (ic->sk_requests) {
4541                unsigned i;
4542
4543                for (i = 0; i < ic->journal_sections; i++) {
4544                        struct skcipher_request *req = ic->sk_requests[i];
4545                        if (req) {
4546                                kfree_sensitive(req->iv);
4547                                skcipher_request_free(req);
4548                        }
4549                }
4550                kvfree(ic->sk_requests);
4551        }
4552        kvfree(ic->journal_tree);
4553        if (ic->sb)
4554                free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT);
4555
4556        if (ic->internal_hash)
4557                crypto_free_shash(ic->internal_hash);
4558        free_alg(&ic->internal_hash_alg);
4559
4560        if (ic->journal_crypt)
4561                crypto_free_skcipher(ic->journal_crypt);
4562        free_alg(&ic->journal_crypt_alg);
4563
4564        if (ic->journal_mac)
4565                crypto_free_shash(ic->journal_mac);
4566        free_alg(&ic->journal_mac_alg);
4567
4568        kfree(ic);
4569}
4570
4571static struct target_type integrity_target = {
4572        .name                   = "integrity",
4573        .version                = {1, 10, 0},
4574        .module                 = THIS_MODULE,
4575        .features               = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
4576        .ctr                    = dm_integrity_ctr,
4577        .dtr                    = dm_integrity_dtr,
4578        .map                    = dm_integrity_map,
4579        .postsuspend            = dm_integrity_postsuspend,
4580        .resume                 = dm_integrity_resume,
4581        .status                 = dm_integrity_status,
4582        .iterate_devices        = dm_integrity_iterate_devices,
4583        .io_hints               = dm_integrity_io_hints,
4584};
4585
4586static int __init dm_integrity_init(void)
4587{
4588        int r;
4589
4590        journal_io_cache = kmem_cache_create("integrity_journal_io",
4591                                             sizeof(struct journal_io), 0, 0, NULL);
4592        if (!journal_io_cache) {
4593                DMERR("can't allocate journal io cache");
4594                return -ENOMEM;
4595        }
4596
4597        r = dm_register_target(&integrity_target);
4598
4599        if (r < 0)
4600                DMERR("register failed %d", r);
4601
4602        return r;
4603}
4604
4605static void __exit dm_integrity_exit(void)
4606{
4607        dm_unregister_target(&integrity_target);
4608        kmem_cache_destroy(journal_io_cache);
4609}
4610
4611module_init(dm_integrity_init);
4612module_exit(dm_integrity_exit);
4613
4614MODULE_AUTHOR("Milan Broz");
4615MODULE_AUTHOR("Mikulas Patocka");
4616MODULE_DESCRIPTION(DM_NAME " target for integrity tags extension");
4617MODULE_LICENSE("GPL");
4618