linux/drivers/md/dm-integrity.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2016-2017 Red Hat, Inc. All rights reserved.
   3 * Copyright (C) 2016-2017 Milan Broz
   4 * Copyright (C) 2016-2017 Mikulas Patocka
   5 *
   6 * This file is released under the GPL.
   7 */
   8
   9#include <linux/compiler.h>
  10#include <linux/module.h>
  11#include <linux/device-mapper.h>
  12#include <linux/dm-io.h>
  13#include <linux/vmalloc.h>
  14#include <linux/sort.h>
  15#include <linux/rbtree.h>
  16#include <linux/delay.h>
  17#include <linux/random.h>
  18#include <crypto/hash.h>
  19#include <crypto/skcipher.h>
  20#include <linux/async_tx.h>
  21#include <linux/dm-bufio.h>
  22
  23#define DM_MSG_PREFIX "integrity"
  24
  25#define DEFAULT_INTERLEAVE_SECTORS      32768
  26#define DEFAULT_JOURNAL_SIZE_FACTOR     7
  27#define DEFAULT_BUFFER_SECTORS          128
  28#define DEFAULT_JOURNAL_WATERMARK       50
  29#define DEFAULT_SYNC_MSEC               10000
  30#define DEFAULT_MAX_JOURNAL_SECTORS     131072
  31#define MIN_LOG2_INTERLEAVE_SECTORS     3
  32#define MAX_LOG2_INTERLEAVE_SECTORS     31
  33#define METADATA_WORKQUEUE_MAX_ACTIVE   16
  34#define RECALC_SECTORS                  8192
  35#define RECALC_WRITE_SUPER              16
  36
  37/*
  38 * Warning - DEBUG_PRINT prints security-sensitive data to the log,
  39 * so it should not be enabled in the official kernel
  40 */
  41//#define DEBUG_PRINT
  42//#define INTERNAL_VERIFY
  43
  44/*
  45 * On disk structures
  46 */
  47
  48#define SB_MAGIC                        "integrt"
  49#define SB_VERSION_1                    1
  50#define SB_VERSION_2                    2
  51#define SB_SECTORS                      8
  52#define MAX_SECTORS_PER_BLOCK           8
  53
  54struct superblock {
  55        __u8 magic[8];
  56        __u8 version;
  57        __u8 log2_interleave_sectors;
  58        __u16 integrity_tag_size;
  59        __u32 journal_sections;
  60        __u64 provided_data_sectors;    /* userspace uses this value */
  61        __u32 flags;
  62        __u8 log2_sectors_per_block;
  63        __u8 pad[3];
  64        __u64 recalc_sector;
  65};
  66
  67#define SB_FLAG_HAVE_JOURNAL_MAC        0x1
  68#define SB_FLAG_RECALCULATING           0x2
  69
  70#define JOURNAL_ENTRY_ROUNDUP           8
  71
  72typedef __u64 commit_id_t;
  73#define JOURNAL_MAC_PER_SECTOR          8
  74
  75struct journal_entry {
  76        union {
  77                struct {
  78                        __u32 sector_lo;
  79                        __u32 sector_hi;
  80                } s;
  81                __u64 sector;
  82        } u;
  83        commit_id_t last_bytes[0];
  84        /* __u8 tag[0]; */
  85};
  86
  87#define journal_entry_tag(ic, je)               ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block])
  88
  89#if BITS_PER_LONG == 64
  90#define journal_entry_set_sector(je, x)         do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0)
  91#define journal_entry_get_sector(je)            le64_to_cpu((je)->u.sector)
  92#elif defined(CONFIG_LBDAF)
  93#define journal_entry_set_sector(je, x)         do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0)
  94#define journal_entry_get_sector(je)            le64_to_cpu((je)->u.sector)
  95#else
  96#define journal_entry_set_sector(je, x)         do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32(0)); } while (0)
  97#define journal_entry_get_sector(je)            le32_to_cpu((je)->u.s.sector_lo)
  98#endif
  99#define journal_entry_is_unused(je)             ((je)->u.s.sector_hi == cpu_to_le32(-1))
 100#define journal_entry_set_unused(je)            do { ((je)->u.s.sector_hi = cpu_to_le32(-1)); } while (0)
 101#define journal_entry_is_inprogress(je)         ((je)->u.s.sector_hi == cpu_to_le32(-2))
 102#define journal_entry_set_inprogress(je)        do { ((je)->u.s.sector_hi = cpu_to_le32(-2)); } while (0)
 103
 104#define JOURNAL_BLOCK_SECTORS           8
 105#define JOURNAL_SECTOR_DATA             ((1 << SECTOR_SHIFT) - sizeof(commit_id_t))
 106#define JOURNAL_MAC_SIZE                (JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS)
 107
 108struct journal_sector {
 109        __u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR];
 110        __u8 mac[JOURNAL_MAC_PER_SECTOR];
 111        commit_id_t commit_id;
 112};
 113
 114#define MAX_TAG_SIZE                    (JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK]))
 115
 116#define METADATA_PADDING_SECTORS        8
 117
 118#define N_COMMIT_IDS                    4
 119
 120static unsigned char prev_commit_seq(unsigned char seq)
 121{
 122        return (seq + N_COMMIT_IDS - 1) % N_COMMIT_IDS;
 123}
 124
 125static unsigned char next_commit_seq(unsigned char seq)
 126{
 127        return (seq + 1) % N_COMMIT_IDS;
 128}
 129
 130/*
 131 * In-memory structures
 132 */
 133
 134struct journal_node {
 135        struct rb_node node;
 136        sector_t sector;
 137};
 138
 139struct alg_spec {
 140        char *alg_string;
 141        char *key_string;
 142        __u8 *key;
 143        unsigned key_size;
 144};
 145
 146struct dm_integrity_c {
 147        struct dm_dev *dev;
 148        struct dm_dev *meta_dev;
 149        unsigned tag_size;
 150        __s8 log2_tag_size;
 151        sector_t start;
 152        mempool_t journal_io_mempool;
 153        struct dm_io_client *io;
 154        struct dm_bufio_client *bufio;
 155        struct workqueue_struct *metadata_wq;
 156        struct superblock *sb;
 157        unsigned journal_pages;
 158        struct page_list *journal;
 159        struct page_list *journal_io;
 160        struct page_list *journal_xor;
 161
 162        struct crypto_skcipher *journal_crypt;
 163        struct scatterlist **journal_scatterlist;
 164        struct scatterlist **journal_io_scatterlist;
 165        struct skcipher_request **sk_requests;
 166
 167        struct crypto_shash *journal_mac;
 168
 169        struct journal_node *journal_tree;
 170        struct rb_root journal_tree_root;
 171
 172        sector_t provided_data_sectors;
 173
 174        unsigned short journal_entry_size;
 175        unsigned char journal_entries_per_sector;
 176        unsigned char journal_section_entries;
 177        unsigned short journal_section_sectors;
 178        unsigned journal_sections;
 179        unsigned journal_entries;
 180        sector_t data_device_sectors;
 181        sector_t meta_device_sectors;
 182        unsigned initial_sectors;
 183        unsigned metadata_run;
 184        __s8 log2_metadata_run;
 185        __u8 log2_buffer_sectors;
 186        __u8 sectors_per_block;
 187
 188        unsigned char mode;
 189        int suspending;
 190
 191        int failed;
 192
 193        struct crypto_shash *internal_hash;
 194
 195        /* these variables are locked with endio_wait.lock */
 196        struct rb_root in_progress;
 197        struct list_head wait_list;
 198        wait_queue_head_t endio_wait;
 199        struct workqueue_struct *wait_wq;
 200
 201        unsigned char commit_seq;
 202        commit_id_t commit_ids[N_COMMIT_IDS];
 203
 204        unsigned committed_section;
 205        unsigned n_committed_sections;
 206
 207        unsigned uncommitted_section;
 208        unsigned n_uncommitted_sections;
 209
 210        unsigned free_section;
 211        unsigned char free_section_entry;
 212        unsigned free_sectors;
 213
 214        unsigned free_sectors_threshold;
 215
 216        struct workqueue_struct *commit_wq;
 217        struct work_struct commit_work;
 218
 219        struct workqueue_struct *writer_wq;
 220        struct work_struct writer_work;
 221
 222        struct workqueue_struct *recalc_wq;
 223        struct work_struct recalc_work;
 224        u8 *recalc_buffer;
 225        u8 *recalc_tags;
 226
 227        struct bio_list flush_bio_list;
 228
 229        unsigned long autocommit_jiffies;
 230        struct timer_list autocommit_timer;
 231        unsigned autocommit_msec;
 232
 233        wait_queue_head_t copy_to_journal_wait;
 234
 235        struct completion crypto_backoff;
 236
 237        bool journal_uptodate;
 238        bool just_formatted;
 239
 240        struct alg_spec internal_hash_alg;
 241        struct alg_spec journal_crypt_alg;
 242        struct alg_spec journal_mac_alg;
 243
 244        atomic64_t number_of_mismatches;
 245};
 246
 247struct dm_integrity_range {
 248        sector_t logical_sector;
 249        unsigned n_sectors;
 250        bool waiting;
 251        union {
 252                struct rb_node node;
 253                struct {
 254                        struct task_struct *task;
 255                        struct list_head wait_entry;
 256                };
 257        };
 258};
 259
 260struct dm_integrity_io {
 261        struct work_struct work;
 262
 263        struct dm_integrity_c *ic;
 264        bool write;
 265        bool fua;
 266
 267        struct dm_integrity_range range;
 268
 269        sector_t metadata_block;
 270        unsigned metadata_offset;
 271
 272        atomic_t in_flight;
 273        blk_status_t bi_status;
 274
 275        struct completion *completion;
 276
 277        struct gendisk *orig_bi_disk;
 278        u8 orig_bi_partno;
 279        bio_end_io_t *orig_bi_end_io;
 280        struct bio_integrity_payload *orig_bi_integrity;
 281        struct bvec_iter orig_bi_iter;
 282};
 283
 284struct journal_completion {
 285        struct dm_integrity_c *ic;
 286        atomic_t in_flight;
 287        struct completion comp;
 288};
 289
 290struct journal_io {
 291        struct dm_integrity_range range;
 292        struct journal_completion *comp;
 293};
 294
 295static struct kmem_cache *journal_io_cache;
 296
 297#define JOURNAL_IO_MEMPOOL      32
 298
 299#ifdef DEBUG_PRINT
 300#define DEBUG_print(x, ...)     printk(KERN_DEBUG x, ##__VA_ARGS__)
 301static void __DEBUG_bytes(__u8 *bytes, size_t len, const char *msg, ...)
 302{
 303        va_list args;
 304        va_start(args, msg);
 305        vprintk(msg, args);
 306        va_end(args);
 307        if (len)
 308                pr_cont(":");
 309        while (len) {
 310                pr_cont(" %02x", *bytes);
 311                bytes++;
 312                len--;
 313        }
 314        pr_cont("\n");
 315}
 316#define DEBUG_bytes(bytes, len, msg, ...)       __DEBUG_bytes(bytes, len, KERN_DEBUG msg, ##__VA_ARGS__)
 317#else
 318#define DEBUG_print(x, ...)                     do { } while (0)
 319#define DEBUG_bytes(bytes, len, msg, ...)       do { } while (0)
 320#endif
 321
 322/*
 323 * DM Integrity profile, protection is performed layer above (dm-crypt)
 324 */
 325static const struct blk_integrity_profile dm_integrity_profile = {
 326        .name                   = "DM-DIF-EXT-TAG",
 327        .generate_fn            = NULL,
 328        .verify_fn              = NULL,
 329};
 330
 331static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);
 332static void integrity_bio_wait(struct work_struct *w);
 333static void dm_integrity_dtr(struct dm_target *ti);
 334
 335static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err)
 336{
 337        if (err == -EILSEQ)
 338                atomic64_inc(&ic->number_of_mismatches);
 339        if (!cmpxchg(&ic->failed, 0, err))
 340                DMERR("Error on %s: %d", msg, err);
 341}
 342
 343static int dm_integrity_failed(struct dm_integrity_c *ic)
 344{
 345        return READ_ONCE(ic->failed);
 346}
 347
 348static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
 349                                          unsigned j, unsigned char seq)
 350{
 351        /*
 352         * Xor the number with section and sector, so that if a piece of
 353         * journal is written at wrong place, it is detected.
 354         */
 355        return ic->commit_ids[seq] ^ cpu_to_le64(((__u64)i << 32) ^ j);
 356}
 357
 358static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
 359                                sector_t *area, sector_t *offset)
 360{
 361        if (!ic->meta_dev) {
 362                __u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors;
 363                *area = data_sector >> log2_interleave_sectors;
 364                *offset = (unsigned)data_sector & ((1U << log2_interleave_sectors) - 1);
 365        } else {
 366                *area = 0;
 367                *offset = data_sector;
 368        }
 369}
 370
 371#define sector_to_block(ic, n)                                          \
 372do {                                                                    \
 373        BUG_ON((n) & (unsigned)((ic)->sectors_per_block - 1));          \
 374        (n) >>= (ic)->sb->log2_sectors_per_block;                       \
 375} while (0)
 376
 377static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area,
 378                                            sector_t offset, unsigned *metadata_offset)
 379{
 380        __u64 ms;
 381        unsigned mo;
 382
 383        ms = area << ic->sb->log2_interleave_sectors;
 384        if (likely(ic->log2_metadata_run >= 0))
 385                ms += area << ic->log2_metadata_run;
 386        else
 387                ms += area * ic->metadata_run;
 388        ms >>= ic->log2_buffer_sectors;
 389
 390        sector_to_block(ic, offset);
 391
 392        if (likely(ic->log2_tag_size >= 0)) {
 393                ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size);
 394                mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
 395        } else {
 396                ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors);
 397                mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
 398        }
 399        *metadata_offset = mo;
 400        return ms;
 401}
 402
 403static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset)
 404{
 405        sector_t result;
 406
 407        if (ic->meta_dev)
 408                return offset;
 409
 410        result = area << ic->sb->log2_interleave_sectors;
 411        if (likely(ic->log2_metadata_run >= 0))
 412                result += (area + 1) << ic->log2_metadata_run;
 413        else
 414                result += (area + 1) * ic->metadata_run;
 415
 416        result += (sector_t)ic->initial_sectors + offset;
 417        result += ic->start;
 418
 419        return result;
 420}
 421
 422static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr)
 423{
 424        if (unlikely(*sec_ptr >= ic->journal_sections))
 425                *sec_ptr -= ic->journal_sections;
 426}
 427
 428static void sb_set_version(struct dm_integrity_c *ic)
 429{
 430        if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
 431                ic->sb->version = SB_VERSION_2;
 432        else
 433                ic->sb->version = SB_VERSION_1;
 434}
 435
 436static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags)
 437{
 438        struct dm_io_request io_req;
 439        struct dm_io_region io_loc;
 440
 441        io_req.bi_op = op;
 442        io_req.bi_op_flags = op_flags;
 443        io_req.mem.type = DM_IO_KMEM;
 444        io_req.mem.ptr.addr = ic->sb;
 445        io_req.notify.fn = NULL;
 446        io_req.client = ic->io;
 447        io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
 448        io_loc.sector = ic->start;
 449        io_loc.count = SB_SECTORS;
 450
 451        return dm_io(&io_req, 1, &io_loc, NULL);
 452}
 453
 454static void access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset,
 455                                 bool e, const char *function)
 456{
 457#if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY)
 458        unsigned limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
 459
 460        if (unlikely(section >= ic->journal_sections) ||
 461            unlikely(offset >= limit)) {
 462                printk(KERN_CRIT "%s: invalid access at (%u,%u), limit (%u,%u)\n",
 463                        function, section, offset, ic->journal_sections, limit);
 464                BUG();
 465        }
 466#endif
 467}
 468
 469static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsigned offset,
 470                               unsigned *pl_index, unsigned *pl_offset)
 471{
 472        unsigned sector;
 473
 474        access_journal_check(ic, section, offset, false, "page_list_location");
 475
 476        sector = section * ic->journal_section_sectors + offset;
 477
 478        *pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
 479        *pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
 480}
 481
 482static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl,
 483                                               unsigned section, unsigned offset, unsigned *n_sectors)
 484{
 485        unsigned pl_index, pl_offset;
 486        char *va;
 487
 488        page_list_location(ic, section, offset, &pl_index, &pl_offset);
 489
 490        if (n_sectors)
 491                *n_sectors = (PAGE_SIZE - pl_offset) >> SECTOR_SHIFT;
 492
 493        va = lowmem_page_address(pl[pl_index].page);
 494
 495        return (struct journal_sector *)(va + pl_offset);
 496}
 497
 498static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset)
 499{
 500        return access_page_list(ic, ic->journal, section, offset, NULL);
 501}
 502
 503static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned section, unsigned n)
 504{
 505        unsigned rel_sector, offset;
 506        struct journal_sector *js;
 507
 508        access_journal_check(ic, section, n, true, "access_journal_entry");
 509
 510        rel_sector = n % JOURNAL_BLOCK_SECTORS;
 511        offset = n / JOURNAL_BLOCK_SECTORS;
 512
 513        js = access_journal(ic, section, rel_sector);
 514        return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size);
 515}
 516
 517static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned section, unsigned n)
 518{
 519        n <<= ic->sb->log2_sectors_per_block;
 520
 521        n += JOURNAL_BLOCK_SECTORS;
 522
 523        access_journal_check(ic, section, n, false, "access_journal_data");
 524
 525        return access_journal(ic, section, n);
 526}
 527
 528static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result[JOURNAL_MAC_SIZE])
 529{
 530        SHASH_DESC_ON_STACK(desc, ic->journal_mac);
 531        int r;
 532        unsigned j, size;
 533
 534        desc->tfm = ic->journal_mac;
 535        desc->flags = 0;
 536
 537        r = crypto_shash_init(desc);
 538        if (unlikely(r)) {
 539                dm_integrity_io_error(ic, "crypto_shash_init", r);
 540                goto err;
 541        }
 542
 543        for (j = 0; j < ic->journal_section_entries; j++) {
 544                struct journal_entry *je = access_journal_entry(ic, section, j);
 545                r = crypto_shash_update(desc, (__u8 *)&je->u.sector, sizeof je->u.sector);
 546                if (unlikely(r)) {
 547                        dm_integrity_io_error(ic, "crypto_shash_update", r);
 548                        goto err;
 549                }
 550        }
 551
 552        size = crypto_shash_digestsize(ic->journal_mac);
 553
 554        if (likely(size <= JOURNAL_MAC_SIZE)) {
 555                r = crypto_shash_final(desc, result);
 556                if (unlikely(r)) {
 557                        dm_integrity_io_error(ic, "crypto_shash_final", r);
 558                        goto err;
 559                }
 560                memset(result + size, 0, JOURNAL_MAC_SIZE - size);
 561        } else {
 562                __u8 digest[HASH_MAX_DIGESTSIZE];
 563
 564                if (WARN_ON(size > sizeof(digest))) {
 565                        dm_integrity_io_error(ic, "digest_size", -EINVAL);
 566                        goto err;
 567                }
 568                r = crypto_shash_final(desc, digest);
 569                if (unlikely(r)) {
 570                        dm_integrity_io_error(ic, "crypto_shash_final", r);
 571                        goto err;
 572                }
 573                memcpy(result, digest, JOURNAL_MAC_SIZE);
 574        }
 575
 576        return;
 577err:
 578        memset(result, 0, JOURNAL_MAC_SIZE);
 579}
 580
 581static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr)
 582{
 583        __u8 result[JOURNAL_MAC_SIZE];
 584        unsigned j;
 585
 586        if (!ic->journal_mac)
 587                return;
 588
 589        section_mac(ic, section, result);
 590
 591        for (j = 0; j < JOURNAL_BLOCK_SECTORS; j++) {
 592                struct journal_sector *js = access_journal(ic, section, j);
 593
 594                if (likely(wr))
 595                        memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR);
 596                else {
 597                        if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR))
 598                                dm_integrity_io_error(ic, "journal mac", -EILSEQ);
 599                }
 600        }
 601}
 602
 603static void complete_journal_op(void *context)
 604{
 605        struct journal_completion *comp = context;
 606        BUG_ON(!atomic_read(&comp->in_flight));
 607        if (likely(atomic_dec_and_test(&comp->in_flight)))
 608                complete(&comp->comp);
 609}
 610
 611static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
 612                        unsigned n_sections, struct journal_completion *comp)
 613{
 614        struct async_submit_ctl submit;
 615        size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT;
 616        unsigned pl_index, pl_offset, section_index;
 617        struct page_list *source_pl, *target_pl;
 618
 619        if (likely(encrypt)) {
 620                source_pl = ic->journal;
 621                target_pl = ic->journal_io;
 622        } else {
 623                source_pl = ic->journal_io;
 624                target_pl = ic->journal;
 625        }
 626
 627        page_list_location(ic, section, 0, &pl_index, &pl_offset);
 628
 629        atomic_add(roundup(pl_offset + n_bytes, PAGE_SIZE) >> PAGE_SHIFT, &comp->in_flight);
 630
 631        init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, complete_journal_op, comp, NULL);
 632
 633        section_index = pl_index;
 634
 635        do {
 636                size_t this_step;
 637                struct page *src_pages[2];
 638                struct page *dst_page;
 639
 640                while (unlikely(pl_index == section_index)) {
 641                        unsigned dummy;
 642                        if (likely(encrypt))
 643                                rw_section_mac(ic, section, true);
 644                        section++;
 645                        n_sections--;
 646                        if (!n_sections)
 647                                break;
 648                        page_list_location(ic, section, 0, &section_index, &dummy);
 649                }
 650
 651                this_step = min(n_bytes, (size_t)PAGE_SIZE - pl_offset);
 652                dst_page = target_pl[pl_index].page;
 653                src_pages[0] = source_pl[pl_index].page;
 654                src_pages[1] = ic->journal_xor[pl_index].page;
 655
 656                async_xor(dst_page, src_pages, pl_offset, 2, this_step, &submit);
 657
 658                pl_index++;
 659                pl_offset = 0;
 660                n_bytes -= this_step;
 661        } while (n_bytes);
 662
 663        BUG_ON(n_sections);
 664
 665        async_tx_issue_pending_all();
 666}
 667
 668static void complete_journal_encrypt(struct crypto_async_request *req, int err)
 669{
 670        struct journal_completion *comp = req->data;
 671        if (unlikely(err)) {
 672                if (likely(err == -EINPROGRESS)) {
 673                        complete(&comp->ic->crypto_backoff);
 674                        return;
 675                }
 676                dm_integrity_io_error(comp->ic, "asynchronous encrypt", err);
 677        }
 678        complete_journal_op(comp);
 679}
 680
 681static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
 682{
 683        int r;
 684        skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
 685                                      complete_journal_encrypt, comp);
 686        if (likely(encrypt))
 687                r = crypto_skcipher_encrypt(req);
 688        else
 689                r = crypto_skcipher_decrypt(req);
 690        if (likely(!r))
 691                return false;
 692        if (likely(r == -EINPROGRESS))
 693                return true;
 694        if (likely(r == -EBUSY)) {
 695                wait_for_completion(&comp->ic->crypto_backoff);
 696                reinit_completion(&comp->ic->crypto_backoff);
 697                return true;
 698        }
 699        dm_integrity_io_error(comp->ic, "encrypt", r);
 700        return false;
 701}
 702
 703static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
 704                          unsigned n_sections, struct journal_completion *comp)
 705{
 706        struct scatterlist **source_sg;
 707        struct scatterlist **target_sg;
 708
 709        atomic_add(2, &comp->in_flight);
 710
 711        if (likely(encrypt)) {
 712                source_sg = ic->journal_scatterlist;
 713                target_sg = ic->journal_io_scatterlist;
 714        } else {
 715                source_sg = ic->journal_io_scatterlist;
 716                target_sg = ic->journal_scatterlist;
 717        }
 718
 719        do {
 720                struct skcipher_request *req;
 721                unsigned ivsize;
 722                char *iv;
 723
 724                if (likely(encrypt))
 725                        rw_section_mac(ic, section, true);
 726
 727                req = ic->sk_requests[section];
 728                ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
 729                iv = req->iv;
 730
 731                memcpy(iv, iv + ivsize, ivsize);
 732
 733                req->src = source_sg[section];
 734                req->dst = target_sg[section];
 735
 736                if (unlikely(do_crypt(encrypt, req, comp)))
 737                        atomic_inc(&comp->in_flight);
 738
 739                section++;
 740                n_sections--;
 741        } while (n_sections);
 742
 743        atomic_dec(&comp->in_flight);
 744        complete_journal_op(comp);
 745}
 746
 747static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
 748                            unsigned n_sections, struct journal_completion *comp)
 749{
 750        if (ic->journal_xor)
 751                return xor_journal(ic, encrypt, section, n_sections, comp);
 752        else
 753                return crypt_journal(ic, encrypt, section, n_sections, comp);
 754}
 755
 756static void complete_journal_io(unsigned long error, void *context)
 757{
 758        struct journal_completion *comp = context;
 759        if (unlikely(error != 0))
 760                dm_integrity_io_error(comp->ic, "writing journal", -EIO);
 761        complete_journal_op(comp);
 762}
 763
 764static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section,
 765                       unsigned n_sections, struct journal_completion *comp)
 766{
 767        struct dm_io_request io_req;
 768        struct dm_io_region io_loc;
 769        unsigned sector, n_sectors, pl_index, pl_offset;
 770        int r;
 771
 772        if (unlikely(dm_integrity_failed(ic))) {
 773                if (comp)
 774                        complete_journal_io(-1UL, comp);
 775                return;
 776        }
 777
 778        sector = section * ic->journal_section_sectors;
 779        n_sectors = n_sections * ic->journal_section_sectors;
 780
 781        pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
 782        pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
 783
 784        io_req.bi_op = op;
 785        io_req.bi_op_flags = op_flags;
 786        io_req.mem.type = DM_IO_PAGE_LIST;
 787        if (ic->journal_io)
 788                io_req.mem.ptr.pl = &ic->journal_io[pl_index];
 789        else
 790                io_req.mem.ptr.pl = &ic->journal[pl_index];
 791        io_req.mem.offset = pl_offset;
 792        if (likely(comp != NULL)) {
 793                io_req.notify.fn = complete_journal_io;
 794                io_req.notify.context = comp;
 795        } else {
 796                io_req.notify.fn = NULL;
 797        }
 798        io_req.client = ic->io;
 799        io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
 800        io_loc.sector = ic->start + SB_SECTORS + sector;
 801        io_loc.count = n_sectors;
 802
 803        r = dm_io(&io_req, 1, &io_loc, NULL);
 804        if (unlikely(r)) {
 805                dm_integrity_io_error(ic, op == REQ_OP_READ ? "reading journal" : "writing journal", r);
 806                if (comp) {
 807                        WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
 808                        complete_journal_io(-1UL, comp);
 809                }
 810        }
 811}
 812
 813static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections)
 814{
 815        struct journal_completion io_comp;
 816        struct journal_completion crypt_comp_1;
 817        struct journal_completion crypt_comp_2;
 818        unsigned i;
 819
 820        io_comp.ic = ic;
 821        init_completion(&io_comp.comp);
 822
 823        if (commit_start + commit_sections <= ic->journal_sections) {
 824                io_comp.in_flight = (atomic_t)ATOMIC_INIT(1);
 825                if (ic->journal_io) {
 826                        crypt_comp_1.ic = ic;
 827                        init_completion(&crypt_comp_1.comp);
 828                        crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
 829                        encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1);
 830                        wait_for_completion_io(&crypt_comp_1.comp);
 831                } else {
 832                        for (i = 0; i < commit_sections; i++)
 833                                rw_section_mac(ic, commit_start + i, true);
 834                }
 835                rw_journal(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, commit_start,
 836                           commit_sections, &io_comp);
 837        } else {
 838                unsigned to_end;
 839                io_comp.in_flight = (atomic_t)ATOMIC_INIT(2);
 840                to_end = ic->journal_sections - commit_start;
 841                if (ic->journal_io) {
 842                        crypt_comp_1.ic = ic;
 843                        init_completion(&crypt_comp_1.comp);
 844                        crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
 845                        encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1);
 846                        if (try_wait_for_completion(&crypt_comp_1.comp)) {
 847                                rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
 848                                reinit_completion(&crypt_comp_1.comp);
 849                                crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
 850                                encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1);
 851                                wait_for_completion_io(&crypt_comp_1.comp);
 852                        } else {
 853                                crypt_comp_2.ic = ic;
 854                                init_completion(&crypt_comp_2.comp);
 855                                crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0);
 856                                encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2);
 857                                wait_for_completion_io(&crypt_comp_1.comp);
 858                                rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
 859                                wait_for_completion_io(&crypt_comp_2.comp);
 860                        }
 861                } else {
 862                        for (i = 0; i < to_end; i++)
 863                                rw_section_mac(ic, commit_start + i, true);
 864                        rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
 865                        for (i = 0; i < commit_sections - to_end; i++)
 866                                rw_section_mac(ic, i, true);
 867                }
 868                rw_journal(ic, REQ_OP_WRITE, REQ_FUA, 0, commit_sections - to_end, &io_comp);
 869        }
 870
 871        wait_for_completion_io(&io_comp.comp);
 872}
 873
 874static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset,
 875                              unsigned n_sectors, sector_t target, io_notify_fn fn, void *data)
 876{
 877        struct dm_io_request io_req;
 878        struct dm_io_region io_loc;
 879        int r;
 880        unsigned sector, pl_index, pl_offset;
 881
 882        BUG_ON((target | n_sectors | offset) & (unsigned)(ic->sectors_per_block - 1));
 883
 884        if (unlikely(dm_integrity_failed(ic))) {
 885                fn(-1UL, data);
 886                return;
 887        }
 888
 889        sector = section * ic->journal_section_sectors + JOURNAL_BLOCK_SECTORS + offset;
 890
 891        pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
 892        pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
 893
 894        io_req.bi_op = REQ_OP_WRITE;
 895        io_req.bi_op_flags = 0;
 896        io_req.mem.type = DM_IO_PAGE_LIST;
 897        io_req.mem.ptr.pl = &ic->journal[pl_index];
 898        io_req.mem.offset = pl_offset;
 899        io_req.notify.fn = fn;
 900        io_req.notify.context = data;
 901        io_req.client = ic->io;
 902        io_loc.bdev = ic->dev->bdev;
 903        io_loc.sector = target;
 904        io_loc.count = n_sectors;
 905
 906        r = dm_io(&io_req, 1, &io_loc, NULL);
 907        if (unlikely(r)) {
 908                WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
 909                fn(-1UL, data);
 910        }
 911}
 912
 913static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)
 914{
 915        return range1->logical_sector < range2->logical_sector + range2->n_sectors &&
 916               range2->logical_sector + range2->n_sectors > range2->logical_sector;
 917}
 918
 919static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
 920{
 921        struct rb_node **n = &ic->in_progress.rb_node;
 922        struct rb_node *parent;
 923
 924        BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned)(ic->sectors_per_block - 1));
 925
 926        if (likely(check_waiting)) {
 927                struct dm_integrity_range *range;
 928                list_for_each_entry(range, &ic->wait_list, wait_entry) {
 929                        if (unlikely(ranges_overlap(range, new_range)))
 930                                return false;
 931                }
 932        }
 933
 934        parent = NULL;
 935
 936        while (*n) {
 937                struct dm_integrity_range *range = container_of(*n, struct dm_integrity_range, node);
 938
 939                parent = *n;
 940                if (new_range->logical_sector + new_range->n_sectors <= range->logical_sector) {
 941                        n = &range->node.rb_left;
 942                } else if (new_range->logical_sector >= range->logical_sector + range->n_sectors) {
 943                        n = &range->node.rb_right;
 944                } else {
 945                        return false;
 946                }
 947        }
 948
 949        rb_link_node(&new_range->node, parent, n);
 950        rb_insert_color(&new_range->node, &ic->in_progress);
 951
 952        return true;
 953}
 954
 955static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range)
 956{
 957        rb_erase(&range->node, &ic->in_progress);
 958        while (unlikely(!list_empty(&ic->wait_list))) {
 959                struct dm_integrity_range *last_range =
 960                        list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
 961                struct task_struct *last_range_task;
 962                if (!ranges_overlap(range, last_range))
 963                        break;
 964                last_range_task = last_range->task;
 965                list_del(&last_range->wait_entry);
 966                if (!add_new_range(ic, last_range, false)) {
 967                        last_range->task = last_range_task;
 968                        list_add(&last_range->wait_entry, &ic->wait_list);
 969                        break;
 970                }
 971                last_range->waiting = false;
 972                wake_up_process(last_range_task);
 973        }
 974}
 975
 976static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range)
 977{
 978        unsigned long flags;
 979
 980        spin_lock_irqsave(&ic->endio_wait.lock, flags);
 981        remove_range_unlocked(ic, range);
 982        spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
 983}
 984
 985static void wait_and_add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
 986{
 987        new_range->waiting = true;
 988        list_add_tail(&new_range->wait_entry, &ic->wait_list);
 989        new_range->task = current;
 990        do {
 991                __set_current_state(TASK_UNINTERRUPTIBLE);
 992                spin_unlock_irq(&ic->endio_wait.lock);
 993                io_schedule();
 994                spin_lock_irq(&ic->endio_wait.lock);
 995        } while (unlikely(new_range->waiting));
 996}
 997
 998static void init_journal_node(struct journal_node *node)
 999{
1000        RB_CLEAR_NODE(&node->node);
1001        node->sector = (sector_t)-1;
1002}
1003
1004static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector)
1005{
1006        struct rb_node **link;
1007        struct rb_node *parent;
1008
1009        node->sector = sector;
1010        BUG_ON(!RB_EMPTY_NODE(&node->node));
1011
1012        link = &ic->journal_tree_root.rb_node;
1013        parent = NULL;
1014
1015        while (*link) {
1016                struct journal_node *j;
1017                parent = *link;
1018                j = container_of(parent, struct journal_node, node);
1019                if (sector < j->sector)
1020                        link = &j->node.rb_left;
1021                else
1022                        link = &j->node.rb_right;
1023        }
1024
1025        rb_link_node(&node->node, parent, link);
1026        rb_insert_color(&node->node, &ic->journal_tree_root);
1027}
1028
1029static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node)
1030{
1031        BUG_ON(RB_EMPTY_NODE(&node->node));
1032        rb_erase(&node->node, &ic->journal_tree_root);
1033        init_journal_node(node);
1034}
1035
1036#define NOT_FOUND       (-1U)
1037
1038static unsigned find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)
1039{
1040        struct rb_node *n = ic->journal_tree_root.rb_node;
1041        unsigned found = NOT_FOUND;
1042        *next_sector = (sector_t)-1;
1043        while (n) {
1044                struct journal_node *j = container_of(n, struct journal_node, node);
1045                if (sector == j->sector) {
1046                        found = j - ic->journal_tree;
1047                }
1048                if (sector < j->sector) {
1049                        *next_sector = j->sector;
1050                        n = j->node.rb_left;
1051                } else {
1052                        n = j->node.rb_right;
1053                }
1054        }
1055
1056        return found;
1057}
1058
1059static bool test_journal_node(struct dm_integrity_c *ic, unsigned pos, sector_t sector)
1060{
1061        struct journal_node *node, *next_node;
1062        struct rb_node *next;
1063
1064        if (unlikely(pos >= ic->journal_entries))
1065                return false;
1066        node = &ic->journal_tree[pos];
1067        if (unlikely(RB_EMPTY_NODE(&node->node)))
1068                return false;
1069        if (unlikely(node->sector != sector))
1070                return false;
1071
1072        next = rb_next(&node->node);
1073        if (unlikely(!next))
1074                return true;
1075
1076        next_node = container_of(next, struct journal_node, node);
1077        return next_node->sector != sector;
1078}
1079
1080static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node)
1081{
1082        struct rb_node *next;
1083        struct journal_node *next_node;
1084        unsigned next_section;
1085
1086        BUG_ON(RB_EMPTY_NODE(&node->node));
1087
1088        next = rb_next(&node->node);
1089        if (unlikely(!next))
1090                return false;
1091
1092        next_node = container_of(next, struct journal_node, node);
1093
1094        if (next_node->sector != node->sector)
1095                return false;
1096
1097        next_section = (unsigned)(next_node - ic->journal_tree) / ic->journal_section_entries;
1098        if (next_section >= ic->committed_section &&
1099            next_section < ic->committed_section + ic->n_committed_sections)
1100                return true;
1101        if (next_section + ic->journal_sections < ic->committed_section + ic->n_committed_sections)
1102                return true;
1103
1104        return false;
1105}
1106
1107#define TAG_READ        0
1108#define TAG_WRITE       1
1109#define TAG_CMP         2
1110
1111static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
1112                               unsigned *metadata_offset, unsigned total_size, int op)
1113{
1114        do {
1115                unsigned char *data, *dp;
1116                struct dm_buffer *b;
1117                unsigned to_copy;
1118                int r;
1119
1120                r = dm_integrity_failed(ic);
1121                if (unlikely(r))
1122                        return r;
1123
1124                data = dm_bufio_read(ic->bufio, *metadata_block, &b);
1125                if (unlikely(IS_ERR(data)))
1126                        return PTR_ERR(data);
1127
1128                to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size);
1129                dp = data + *metadata_offset;
1130                if (op == TAG_READ) {
1131                        memcpy(tag, dp, to_copy);
1132                } else if (op == TAG_WRITE) {
1133                        memcpy(dp, tag, to_copy);
1134                        dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy);
1135                } else  {
1136                        /* e.g.: op == TAG_CMP */
1137                        if (unlikely(memcmp(dp, tag, to_copy))) {
1138                                unsigned i;
1139
1140                                for (i = 0; i < to_copy; i++) {
1141                                        if (dp[i] != tag[i])
1142                                                break;
1143                                        total_size--;
1144                                }
1145                                dm_bufio_release(b);
1146                                return total_size;
1147                        }
1148                }
1149                dm_bufio_release(b);
1150
1151                tag += to_copy;
1152                *metadata_offset += to_copy;
1153                if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) {
1154                        (*metadata_block)++;
1155                        *metadata_offset = 0;
1156                }
1157                total_size -= to_copy;
1158        } while (unlikely(total_size));
1159
1160        return 0;
1161}
1162
1163static void dm_integrity_flush_buffers(struct dm_integrity_c *ic)
1164{
1165        int r;
1166        r = dm_bufio_write_dirty_buffers(ic->bufio);
1167        if (unlikely(r))
1168                dm_integrity_io_error(ic, "writing tags", r);
1169}
1170
1171static void sleep_on_endio_wait(struct dm_integrity_c *ic)
1172{
1173        DECLARE_WAITQUEUE(wait, current);
1174        __add_wait_queue(&ic->endio_wait, &wait);
1175        __set_current_state(TASK_UNINTERRUPTIBLE);
1176        spin_unlock_irq(&ic->endio_wait.lock);
1177        io_schedule();
1178        spin_lock_irq(&ic->endio_wait.lock);
1179        __remove_wait_queue(&ic->endio_wait, &wait);
1180}
1181
1182static void autocommit_fn(struct timer_list *t)
1183{
1184        struct dm_integrity_c *ic = from_timer(ic, t, autocommit_timer);
1185
1186        if (likely(!dm_integrity_failed(ic)))
1187                queue_work(ic->commit_wq, &ic->commit_work);
1188}
1189
1190static void schedule_autocommit(struct dm_integrity_c *ic)
1191{
1192        if (!timer_pending(&ic->autocommit_timer))
1193                mod_timer(&ic->autocommit_timer, jiffies + ic->autocommit_jiffies);
1194}
1195
1196static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1197{
1198        struct bio *bio;
1199        unsigned long flags;
1200
1201        spin_lock_irqsave(&ic->endio_wait.lock, flags);
1202        bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1203        bio_list_add(&ic->flush_bio_list, bio);
1204        spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1205
1206        queue_work(ic->commit_wq, &ic->commit_work);
1207}
1208
1209static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
1210{
1211        int r = dm_integrity_failed(ic);
1212        if (unlikely(r) && !bio->bi_status)
1213                bio->bi_status = errno_to_blk_status(r);
1214        bio_endio(bio);
1215}
1216
1217static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1218{
1219        struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1220
1221        if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic)))
1222                submit_flush_bio(ic, dio);
1223        else
1224                do_endio(ic, bio);
1225}
1226
1227static void dec_in_flight(struct dm_integrity_io *dio)
1228{
1229        if (atomic_dec_and_test(&dio->in_flight)) {
1230                struct dm_integrity_c *ic = dio->ic;
1231                struct bio *bio;
1232
1233                remove_range(ic, &dio->range);
1234
1235                if (unlikely(dio->write))
1236                        schedule_autocommit(ic);
1237
1238                bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1239
1240                if (unlikely(dio->bi_status) && !bio->bi_status)
1241                        bio->bi_status = dio->bi_status;
1242                if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
1243                        dio->range.logical_sector += dio->range.n_sectors;
1244                        bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
1245                        INIT_WORK(&dio->work, integrity_bio_wait);
1246                        queue_work(ic->wait_wq, &dio->work);
1247                        return;
1248                }
1249                do_endio_flush(ic, dio);
1250        }
1251}
1252
1253static void integrity_end_io(struct bio *bio)
1254{
1255        struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1256
1257        bio->bi_iter = dio->orig_bi_iter;
1258        bio->bi_disk = dio->orig_bi_disk;
1259        bio->bi_partno = dio->orig_bi_partno;
1260        if (dio->orig_bi_integrity) {
1261                bio->bi_integrity = dio->orig_bi_integrity;
1262                bio->bi_opf |= REQ_INTEGRITY;
1263        }
1264        bio->bi_end_io = dio->orig_bi_end_io;
1265
1266        if (dio->completion)
1267                complete(dio->completion);
1268
1269        dec_in_flight(dio);
1270}
1271
1272static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector,
1273                                      const char *data, char *result)
1274{
1275        __u64 sector_le = cpu_to_le64(sector);
1276        SHASH_DESC_ON_STACK(req, ic->internal_hash);
1277        int r;
1278        unsigned digest_size;
1279
1280        req->tfm = ic->internal_hash;
1281        req->flags = 0;
1282
1283        r = crypto_shash_init(req);
1284        if (unlikely(r < 0)) {
1285                dm_integrity_io_error(ic, "crypto_shash_init", r);
1286                goto failed;
1287        }
1288
1289        r = crypto_shash_update(req, (const __u8 *)&sector_le, sizeof sector_le);
1290        if (unlikely(r < 0)) {
1291                dm_integrity_io_error(ic, "crypto_shash_update", r);
1292                goto failed;
1293        }
1294
1295        r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT);
1296        if (unlikely(r < 0)) {
1297                dm_integrity_io_error(ic, "crypto_shash_update", r);
1298                goto failed;
1299        }
1300
1301        r = crypto_shash_final(req, result);
1302        if (unlikely(r < 0)) {
1303                dm_integrity_io_error(ic, "crypto_shash_final", r);
1304                goto failed;
1305        }
1306
1307        digest_size = crypto_shash_digestsize(ic->internal_hash);
1308        if (unlikely(digest_size < ic->tag_size))
1309                memset(result + digest_size, 0, ic->tag_size - digest_size);
1310
1311        return;
1312
1313failed:
1314        /* this shouldn't happen anyway, the hash functions have no reason to fail */
1315        get_random_bytes(result, ic->tag_size);
1316}
1317
1318static void integrity_metadata(struct work_struct *w)
1319{
1320        struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
1321        struct dm_integrity_c *ic = dio->ic;
1322
1323        int r;
1324
1325        if (ic->internal_hash) {
1326                struct bvec_iter iter;
1327                struct bio_vec bv;
1328                unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1329                struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1330                char *checksums;
1331                unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
1332                char checksums_onstack[HASH_MAX_DIGESTSIZE];
1333                unsigned sectors_to_process = dio->range.n_sectors;
1334                sector_t sector = dio->range.logical_sector;
1335
1336                if (unlikely(ic->mode == 'R'))
1337                        goto skip_io;
1338
1339                checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space,
1340                                    GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
1341                if (!checksums) {
1342                        checksums = checksums_onstack;
1343                        if (WARN_ON(extra_space &&
1344                                    digest_size > sizeof(checksums_onstack))) {
1345                                r = -EINVAL;
1346                                goto error;
1347                        }
1348                }
1349
1350                __bio_for_each_segment(bv, bio, iter, dio->orig_bi_iter) {
1351                        unsigned pos;
1352                        char *mem, *checksums_ptr;
1353
1354again:
1355                        mem = (char *)kmap_atomic(bv.bv_page) + bv.bv_offset;
1356                        pos = 0;
1357                        checksums_ptr = checksums;
1358                        do {
1359                                integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr);
1360                                checksums_ptr += ic->tag_size;
1361                                sectors_to_process -= ic->sectors_per_block;
1362                                pos += ic->sectors_per_block << SECTOR_SHIFT;
1363                                sector += ic->sectors_per_block;
1364                        } while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack);
1365                        kunmap_atomic(mem);
1366
1367                        r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1368                                                checksums_ptr - checksums, !dio->write ? TAG_CMP : TAG_WRITE);
1369                        if (unlikely(r)) {
1370                                if (r > 0) {
1371                                        DMERR("Checksum failed at sector 0x%llx",
1372                                              (unsigned long long)(sector - ((r + ic->tag_size - 1) / ic->tag_size)));
1373                                        r = -EILSEQ;
1374                                        atomic64_inc(&ic->number_of_mismatches);
1375                                }
1376                                if (likely(checksums != checksums_onstack))
1377                                        kfree(checksums);
1378                                goto error;
1379                        }
1380
1381                        if (!sectors_to_process)
1382                                break;
1383
1384                        if (unlikely(pos < bv.bv_len)) {
1385                                bv.bv_offset += pos;
1386                                bv.bv_len -= pos;
1387                                goto again;
1388                        }
1389                }
1390
1391                if (likely(checksums != checksums_onstack))
1392                        kfree(checksums);
1393        } else {
1394                struct bio_integrity_payload *bip = dio->orig_bi_integrity;
1395
1396                if (bip) {
1397                        struct bio_vec biv;
1398                        struct bvec_iter iter;
1399                        unsigned data_to_process = dio->range.n_sectors;
1400                        sector_to_block(ic, data_to_process);
1401                        data_to_process *= ic->tag_size;
1402
1403                        bip_for_each_vec(biv, bip, iter) {
1404                                unsigned char *tag;
1405                                unsigned this_len;
1406
1407                                BUG_ON(PageHighMem(biv.bv_page));
1408                                tag = lowmem_page_address(biv.bv_page) + biv.bv_offset;
1409                                this_len = min(biv.bv_len, data_to_process);
1410                                r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset,
1411                                                        this_len, !dio->write ? TAG_READ : TAG_WRITE);
1412                                if (unlikely(r))
1413                                        goto error;
1414                                data_to_process -= this_len;
1415                                if (!data_to_process)
1416                                        break;
1417                        }
1418                }
1419        }
1420skip_io:
1421        dec_in_flight(dio);
1422        return;
1423error:
1424        dio->bi_status = errno_to_blk_status(r);
1425        dec_in_flight(dio);
1426}
1427
1428static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
1429{
1430        struct dm_integrity_c *ic = ti->private;
1431        struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1432        struct bio_integrity_payload *bip;
1433
1434        sector_t area, offset;
1435
1436        dio->ic = ic;
1437        dio->bi_status = 0;
1438
1439        if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1440                submit_flush_bio(ic, dio);
1441                return DM_MAPIO_SUBMITTED;
1442        }
1443
1444        dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1445        dio->write = bio_op(bio) == REQ_OP_WRITE;
1446        dio->fua = dio->write && bio->bi_opf & REQ_FUA;
1447        if (unlikely(dio->fua)) {
1448                /*
1449                 * Don't pass down the FUA flag because we have to flush
1450                 * disk cache anyway.
1451                 */
1452                bio->bi_opf &= ~REQ_FUA;
1453        }
1454        if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) {
1455                DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx",
1456                      (unsigned long long)dio->range.logical_sector, bio_sectors(bio),
1457                      (unsigned long long)ic->provided_data_sectors);
1458                return DM_MAPIO_KILL;
1459        }
1460        if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) {
1461                DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
1462                      ic->sectors_per_block,
1463                      (unsigned long long)dio->range.logical_sector, bio_sectors(bio));
1464                return DM_MAPIO_KILL;
1465        }
1466
1467        if (ic->sectors_per_block > 1) {
1468                struct bvec_iter iter;
1469                struct bio_vec bv;
1470                bio_for_each_segment(bv, bio, iter) {
1471                        if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
1472                                DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
1473                                        bv.bv_offset, bv.bv_len, ic->sectors_per_block);
1474                                return DM_MAPIO_KILL;
1475                        }
1476                }
1477        }
1478
1479        bip = bio_integrity(bio);
1480        if (!ic->internal_hash) {
1481                if (bip) {
1482                        unsigned wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block;
1483                        if (ic->log2_tag_size >= 0)
1484                                wanted_tag_size <<= ic->log2_tag_size;
1485                        else
1486                                wanted_tag_size *= ic->tag_size;
1487                        if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) {
1488                                DMERR("Invalid integrity data size %u, expected %u", bip->bip_iter.bi_size, wanted_tag_size);
1489                                return DM_MAPIO_KILL;
1490                        }
1491                }
1492        } else {
1493                if (unlikely(bip != NULL)) {
1494                        DMERR("Unexpected integrity data when using internal hash");
1495                        return DM_MAPIO_KILL;
1496                }
1497        }
1498
1499        if (unlikely(ic->mode == 'R') && unlikely(dio->write))
1500                return DM_MAPIO_KILL;
1501
1502        get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1503        dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1504        bio->bi_iter.bi_sector = get_data_sector(ic, area, offset);
1505
1506        dm_integrity_map_continue(dio, true);
1507        return DM_MAPIO_SUBMITTED;
1508}
1509
1510static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
1511                                 unsigned journal_section, unsigned journal_entry)
1512{
1513        struct dm_integrity_c *ic = dio->ic;
1514        sector_t logical_sector;
1515        unsigned n_sectors;
1516
1517        logical_sector = dio->range.logical_sector;
1518        n_sectors = dio->range.n_sectors;
1519        do {
1520                struct bio_vec bv = bio_iovec(bio);
1521                char *mem;
1522
1523                if (unlikely(bv.bv_len >> SECTOR_SHIFT > n_sectors))
1524                        bv.bv_len = n_sectors << SECTOR_SHIFT;
1525                n_sectors -= bv.bv_len >> SECTOR_SHIFT;
1526                bio_advance_iter(bio, &bio->bi_iter, bv.bv_len);
1527retry_kmap:
1528                mem = kmap_atomic(bv.bv_page);
1529                if (likely(dio->write))
1530                        flush_dcache_page(bv.bv_page);
1531
1532                do {
1533                        struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry);
1534
1535                        if (unlikely(!dio->write)) {
1536                                struct journal_sector *js;
1537                                char *mem_ptr;
1538                                unsigned s;
1539
1540                                if (unlikely(journal_entry_is_inprogress(je))) {
1541                                        flush_dcache_page(bv.bv_page);
1542                                        kunmap_atomic(mem);
1543
1544                                        __io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
1545                                        goto retry_kmap;
1546                                }
1547                                smp_rmb();
1548                                BUG_ON(journal_entry_get_sector(je) != logical_sector);
1549                                js = access_journal_data(ic, journal_section, journal_entry);
1550                                mem_ptr = mem + bv.bv_offset;
1551                                s = 0;
1552                                do {
1553                                        memcpy(mem_ptr, js, JOURNAL_SECTOR_DATA);
1554                                        *(commit_id_t *)(mem_ptr + JOURNAL_SECTOR_DATA) = je->last_bytes[s];
1555                                        js++;
1556                                        mem_ptr += 1 << SECTOR_SHIFT;
1557                                } while (++s < ic->sectors_per_block);
1558#ifdef INTERNAL_VERIFY
1559                                if (ic->internal_hash) {
1560                                        char checksums_onstack[max(HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
1561
1562                                        integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
1563                                        if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
1564                                                DMERR("Checksum failed when reading from journal, at sector 0x%llx",
1565                                                      (unsigned long long)logical_sector);
1566                                        }
1567                                }
1568#endif
1569                        }
1570
1571                        if (!ic->internal_hash) {
1572                                struct bio_integrity_payload *bip = bio_integrity(bio);
1573                                unsigned tag_todo = ic->tag_size;
1574                                char *tag_ptr = journal_entry_tag(ic, je);
1575
1576                                if (bip) do {
1577                                        struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
1578                                        unsigned tag_now = min(biv.bv_len, tag_todo);
1579                                        char *tag_addr;
1580                                        BUG_ON(PageHighMem(biv.bv_page));
1581                                        tag_addr = lowmem_page_address(biv.bv_page) + biv.bv_offset;
1582                                        if (likely(dio->write))
1583                                                memcpy(tag_ptr, tag_addr, tag_now);
1584                                        else
1585                                                memcpy(tag_addr, tag_ptr, tag_now);
1586                                        bvec_iter_advance(bip->bip_vec, &bip->bip_iter, tag_now);
1587                                        tag_ptr += tag_now;
1588                                        tag_todo -= tag_now;
1589                                } while (unlikely(tag_todo)); else {
1590                                        if (likely(dio->write))
1591                                                memset(tag_ptr, 0, tag_todo);
1592                                }
1593                        }
1594
1595                        if (likely(dio->write)) {
1596                                struct journal_sector *js;
1597                                unsigned s;
1598
1599                                js = access_journal_data(ic, journal_section, journal_entry);
1600                                memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT);
1601
1602                                s = 0;
1603                                do {
1604                                        je->last_bytes[s] = js[s].commit_id;
1605                                } while (++s < ic->sectors_per_block);
1606
1607                                if (ic->internal_hash) {
1608                                        unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1609                                        if (unlikely(digest_size > ic->tag_size)) {
1610                                                char checksums_onstack[HASH_MAX_DIGESTSIZE];
1611                                                integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
1612                                                memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size);
1613                                        } else
1614                                                integrity_sector_checksum(ic, logical_sector, (char *)js, journal_entry_tag(ic, je));
1615                                }
1616
1617                                journal_entry_set_sector(je, logical_sector);
1618                        }
1619                        logical_sector += ic->sectors_per_block;
1620
1621                        journal_entry++;
1622                        if (unlikely(journal_entry == ic->journal_section_entries)) {
1623                                journal_entry = 0;
1624                                journal_section++;
1625                                wraparound_section(ic, &journal_section);
1626                        }
1627
1628                        bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT;
1629                } while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT);
1630
1631                if (unlikely(!dio->write))
1632                        flush_dcache_page(bv.bv_page);
1633                kunmap_atomic(mem);
1634        } while (n_sectors);
1635
1636        if (likely(dio->write)) {
1637                smp_mb();
1638                if (unlikely(waitqueue_active(&ic->copy_to_journal_wait)))
1639                        wake_up(&ic->copy_to_journal_wait);
1640                if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) {
1641                        queue_work(ic->commit_wq, &ic->commit_work);
1642                } else {
1643                        schedule_autocommit(ic);
1644                }
1645        } else {
1646                remove_range(ic, &dio->range);
1647        }
1648
1649        if (unlikely(bio->bi_iter.bi_size)) {
1650                sector_t area, offset;
1651
1652                dio->range.logical_sector = logical_sector;
1653                get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1654                dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1655                return true;
1656        }
1657
1658        return false;
1659}
1660
1661static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map)
1662{
1663        struct dm_integrity_c *ic = dio->ic;
1664        struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1665        unsigned journal_section, journal_entry;
1666        unsigned journal_read_pos;
1667        struct completion read_comp;
1668        bool need_sync_io = ic->internal_hash && !dio->write;
1669
1670        if (need_sync_io && from_map) {
1671                INIT_WORK(&dio->work, integrity_bio_wait);
1672                queue_work(ic->metadata_wq, &dio->work);
1673                return;
1674        }
1675
1676lock_retry:
1677        spin_lock_irq(&ic->endio_wait.lock);
1678retry:
1679        if (unlikely(dm_integrity_failed(ic))) {
1680                spin_unlock_irq(&ic->endio_wait.lock);
1681                do_endio(ic, bio);
1682                return;
1683        }
1684        dio->range.n_sectors = bio_sectors(bio);
1685        journal_read_pos = NOT_FOUND;
1686        if (likely(ic->mode == 'J')) {
1687                if (dio->write) {
1688                        unsigned next_entry, i, pos;
1689                        unsigned ws, we, range_sectors;
1690
1691                        dio->range.n_sectors = min(dio->range.n_sectors,
1692                                                   ic->free_sectors << ic->sb->log2_sectors_per_block);
1693                        if (unlikely(!dio->range.n_sectors)) {
1694                                if (from_map)
1695                                        goto offload_to_thread;
1696                                sleep_on_endio_wait(ic);
1697                                goto retry;
1698                        }
1699                        range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block;
1700                        ic->free_sectors -= range_sectors;
1701                        journal_section = ic->free_section;
1702                        journal_entry = ic->free_section_entry;
1703
1704                        next_entry = ic->free_section_entry + range_sectors;
1705                        ic->free_section_entry = next_entry % ic->journal_section_entries;
1706                        ic->free_section += next_entry / ic->journal_section_entries;
1707                        ic->n_uncommitted_sections += next_entry / ic->journal_section_entries;
1708                        wraparound_section(ic, &ic->free_section);
1709
1710                        pos = journal_section * ic->journal_section_entries + journal_entry;
1711                        ws = journal_section;
1712                        we = journal_entry;
1713                        i = 0;
1714                        do {
1715                                struct journal_entry *je;
1716
1717                                add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i);
1718                                pos++;
1719                                if (unlikely(pos >= ic->journal_entries))
1720                                        pos = 0;
1721
1722                                je = access_journal_entry(ic, ws, we);
1723                                BUG_ON(!journal_entry_is_unused(je));
1724                                journal_entry_set_inprogress(je);
1725                                we++;
1726                                if (unlikely(we == ic->journal_section_entries)) {
1727                                        we = 0;
1728                                        ws++;
1729                                        wraparound_section(ic, &ws);
1730                                }
1731                        } while ((i += ic->sectors_per_block) < dio->range.n_sectors);
1732
1733                        spin_unlock_irq(&ic->endio_wait.lock);
1734                        goto journal_read_write;
1735                } else {
1736                        sector_t next_sector;
1737                        journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
1738                        if (likely(journal_read_pos == NOT_FOUND)) {
1739                                if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector))
1740                                        dio->range.n_sectors = next_sector - dio->range.logical_sector;
1741                        } else {
1742                                unsigned i;
1743                                unsigned jp = journal_read_pos + 1;
1744                                for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) {
1745                                        if (!test_journal_node(ic, jp, dio->range.logical_sector + i))
1746                                                break;
1747                                }
1748                                dio->range.n_sectors = i;
1749                        }
1750                }
1751        }
1752        if (unlikely(!add_new_range(ic, &dio->range, true))) {
1753                /*
1754                 * We must not sleep in the request routine because it could
1755                 * stall bios on current->bio_list.
1756                 * So, we offload the bio to a workqueue if we have to sleep.
1757                 */
1758                if (from_map) {
1759offload_to_thread:
1760                        spin_unlock_irq(&ic->endio_wait.lock);
1761                        INIT_WORK(&dio->work, integrity_bio_wait);
1762                        queue_work(ic->wait_wq, &dio->work);
1763                        return;
1764                }
1765                wait_and_add_new_range(ic, &dio->range);
1766        }
1767        spin_unlock_irq(&ic->endio_wait.lock);
1768
1769        if (unlikely(journal_read_pos != NOT_FOUND)) {
1770                journal_section = journal_read_pos / ic->journal_section_entries;
1771                journal_entry = journal_read_pos % ic->journal_section_entries;
1772                goto journal_read_write;
1773        }
1774
1775        dio->in_flight = (atomic_t)ATOMIC_INIT(2);
1776
1777        if (need_sync_io) {
1778                init_completion(&read_comp);
1779                dio->completion = &read_comp;
1780        } else
1781                dio->completion = NULL;
1782
1783        dio->orig_bi_iter = bio->bi_iter;
1784
1785        dio->orig_bi_disk = bio->bi_disk;
1786        dio->orig_bi_partno = bio->bi_partno;
1787        bio_set_dev(bio, ic->dev->bdev);
1788
1789        dio->orig_bi_integrity = bio_integrity(bio);
1790        bio->bi_integrity = NULL;
1791        bio->bi_opf &= ~REQ_INTEGRITY;
1792
1793        dio->orig_bi_end_io = bio->bi_end_io;
1794        bio->bi_end_io = integrity_end_io;
1795
1796        bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
1797        generic_make_request(bio);
1798
1799        if (need_sync_io) {
1800                wait_for_completion_io(&read_comp);
1801                if (unlikely(ic->recalc_wq != NULL) &&
1802                    ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
1803                    dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector))
1804                        goto skip_check;
1805                if (likely(!bio->bi_status))
1806                        integrity_metadata(&dio->work);
1807                else
1808skip_check:
1809                        dec_in_flight(dio);
1810
1811        } else {
1812                INIT_WORK(&dio->work, integrity_metadata);
1813                queue_work(ic->metadata_wq, &dio->work);
1814        }
1815
1816        return;
1817
1818journal_read_write:
1819        if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry)))
1820                goto lock_retry;
1821
1822        do_endio_flush(ic, dio);
1823}
1824
1825
1826static void integrity_bio_wait(struct work_struct *w)
1827{
1828        struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
1829
1830        dm_integrity_map_continue(dio, false);
1831}
1832
1833static void pad_uncommitted(struct dm_integrity_c *ic)
1834{
1835        if (ic->free_section_entry) {
1836                ic->free_sectors -= ic->journal_section_entries - ic->free_section_entry;
1837                ic->free_section_entry = 0;
1838                ic->free_section++;
1839                wraparound_section(ic, &ic->free_section);
1840                ic->n_uncommitted_sections++;
1841        }
1842        WARN_ON(ic->journal_sections * ic->journal_section_entries !=
1843                (ic->n_uncommitted_sections + ic->n_committed_sections) * ic->journal_section_entries + ic->free_sectors);
1844}
1845
1846static void integrity_commit(struct work_struct *w)
1847{
1848        struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work);
1849        unsigned commit_start, commit_sections;
1850        unsigned i, j, n;
1851        struct bio *flushes;
1852
1853        del_timer(&ic->autocommit_timer);
1854
1855        spin_lock_irq(&ic->endio_wait.lock);
1856        flushes = bio_list_get(&ic->flush_bio_list);
1857        if (unlikely(ic->mode != 'J')) {
1858                spin_unlock_irq(&ic->endio_wait.lock);
1859                dm_integrity_flush_buffers(ic);
1860                goto release_flush_bios;
1861        }
1862
1863        pad_uncommitted(ic);
1864        commit_start = ic->uncommitted_section;
1865        commit_sections = ic->n_uncommitted_sections;
1866        spin_unlock_irq(&ic->endio_wait.lock);
1867
1868        if (!commit_sections)
1869                goto release_flush_bios;
1870
1871        i = commit_start;
1872        for (n = 0; n < commit_sections; n++) {
1873                for (j = 0; j < ic->journal_section_entries; j++) {
1874                        struct journal_entry *je;
1875                        je = access_journal_entry(ic, i, j);
1876                        io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
1877                }
1878                for (j = 0; j < ic->journal_section_sectors; j++) {
1879                        struct journal_sector *js;
1880                        js = access_journal(ic, i, j);
1881                        js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq);
1882                }
1883                i++;
1884                if (unlikely(i >= ic->journal_sections))
1885                        ic->commit_seq = next_commit_seq(ic->commit_seq);
1886                wraparound_section(ic, &i);
1887        }
1888        smp_rmb();
1889
1890        write_journal(ic, commit_start, commit_sections);
1891
1892        spin_lock_irq(&ic->endio_wait.lock);
1893        ic->uncommitted_section += commit_sections;
1894        wraparound_section(ic, &ic->uncommitted_section);
1895        ic->n_uncommitted_sections -= commit_sections;
1896        ic->n_committed_sections += commit_sections;
1897        spin_unlock_irq(&ic->endio_wait.lock);
1898
1899        if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
1900                queue_work(ic->writer_wq, &ic->writer_work);
1901
1902release_flush_bios:
1903        while (flushes) {
1904                struct bio *next = flushes->bi_next;
1905                flushes->bi_next = NULL;
1906                do_endio(ic, flushes);
1907                flushes = next;
1908        }
1909}
1910
1911static void complete_copy_from_journal(unsigned long error, void *context)
1912{
1913        struct journal_io *io = context;
1914        struct journal_completion *comp = io->comp;
1915        struct dm_integrity_c *ic = comp->ic;
1916        remove_range(ic, &io->range);
1917        mempool_free(io, &ic->journal_io_mempool);
1918        if (unlikely(error != 0))
1919                dm_integrity_io_error(ic, "copying from journal", -EIO);
1920        complete_journal_op(comp);
1921}
1922
1923static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js,
1924                               struct journal_entry *je)
1925{
1926        unsigned s = 0;
1927        do {
1928                js->commit_id = je->last_bytes[s];
1929                js++;
1930        } while (++s < ic->sectors_per_block);
1931}
1932
1933static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
1934                             unsigned write_sections, bool from_replay)
1935{
1936        unsigned i, j, n;
1937        struct journal_completion comp;
1938        struct blk_plug plug;
1939
1940        blk_start_plug(&plug);
1941
1942        comp.ic = ic;
1943        comp.in_flight = (atomic_t)ATOMIC_INIT(1);
1944        init_completion(&comp.comp);
1945
1946        i = write_start;
1947        for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) {
1948#ifndef INTERNAL_VERIFY
1949                if (unlikely(from_replay))
1950#endif
1951                        rw_section_mac(ic, i, false);
1952                for (j = 0; j < ic->journal_section_entries; j++) {
1953                        struct journal_entry *je = access_journal_entry(ic, i, j);
1954                        sector_t sec, area, offset;
1955                        unsigned k, l, next_loop;
1956                        sector_t metadata_block;
1957                        unsigned metadata_offset;
1958                        struct journal_io *io;
1959
1960                        if (journal_entry_is_unused(je))
1961                                continue;
1962                        BUG_ON(unlikely(journal_entry_is_inprogress(je)) && !from_replay);
1963                        sec = journal_entry_get_sector(je);
1964                        if (unlikely(from_replay)) {
1965                                if (unlikely(sec & (unsigned)(ic->sectors_per_block - 1))) {
1966                                        dm_integrity_io_error(ic, "invalid sector in journal", -EIO);
1967                                        sec &= ~(sector_t)(ic->sectors_per_block - 1);
1968                                }
1969                        }
1970                        get_area_and_offset(ic, sec, &area, &offset);
1971                        restore_last_bytes(ic, access_journal_data(ic, i, j), je);
1972                        for (k = j + 1; k < ic->journal_section_entries; k++) {
1973                                struct journal_entry *je2 = access_journal_entry(ic, i, k);
1974                                sector_t sec2, area2, offset2;
1975                                if (journal_entry_is_unused(je2))
1976                                        break;
1977                                BUG_ON(unlikely(journal_entry_is_inprogress(je2)) && !from_replay);
1978                                sec2 = journal_entry_get_sector(je2);
1979                                get_area_and_offset(ic, sec2, &area2, &offset2);
1980                                if (area2 != area || offset2 != offset + ((k - j) << ic->sb->log2_sectors_per_block))
1981                                        break;
1982                                restore_last_bytes(ic, access_journal_data(ic, i, k), je2);
1983                        }
1984                        next_loop = k - 1;
1985
1986                        io = mempool_alloc(&ic->journal_io_mempool, GFP_NOIO);
1987                        io->comp = &comp;
1988                        io->range.logical_sector = sec;
1989                        io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block;
1990
1991                        spin_lock_irq(&ic->endio_wait.lock);
1992                        if (unlikely(!add_new_range(ic, &io->range, true)))
1993                                wait_and_add_new_range(ic, &io->range);
1994
1995                        if (likely(!from_replay)) {
1996                                struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries];
1997
1998                                /* don't write if there is newer committed sector */
1999                                while (j < k && find_newer_committed_node(ic, &section_node[j])) {
2000                                        struct journal_entry *je2 = access_journal_entry(ic, i, j);
2001
2002                                        journal_entry_set_unused(je2);
2003                                        remove_journal_node(ic, &section_node[j]);
2004                                        j++;
2005                                        sec += ic->sectors_per_block;
2006                                        offset += ic->sectors_per_block;
2007                                }
2008                                while (j < k && find_newer_committed_node(ic, &section_node[k - 1])) {
2009                                        struct journal_entry *je2 = access_journal_entry(ic, i, k - 1);
2010
2011                                        journal_entry_set_unused(je2);
2012                                        remove_journal_node(ic, &section_node[k - 1]);
2013                                        k--;
2014                                }
2015                                if (j == k) {
2016                                        remove_range_unlocked(ic, &io->range);
2017                                        spin_unlock_irq(&ic->endio_wait.lock);
2018                                        mempool_free(io, &ic->journal_io_mempool);
2019                                        goto skip_io;
2020                                }
2021                                for (l = j; l < k; l++) {
2022                                        remove_journal_node(ic, &section_node[l]);
2023                                }
2024                        }
2025                        spin_unlock_irq(&ic->endio_wait.lock);
2026
2027                        metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2028                        for (l = j; l < k; l++) {
2029                                int r;
2030                                struct journal_entry *je2 = access_journal_entry(ic, i, l);
2031
2032                                if (
2033#ifndef INTERNAL_VERIFY
2034                                    unlikely(from_replay) &&
2035#endif
2036                                    ic->internal_hash) {
2037                                        char test_tag[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
2038
2039                                        integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
2040                                                                  (char *)access_journal_data(ic, i, l), test_tag);
2041                                        if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size)))
2042                                                dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ);
2043                                }
2044
2045                                journal_entry_set_unused(je2);
2046                                r = dm_integrity_rw_tag(ic, journal_entry_tag(ic, je2), &metadata_block, &metadata_offset,
2047                                                        ic->tag_size, TAG_WRITE);
2048                                if (unlikely(r)) {
2049                                        dm_integrity_io_error(ic, "reading tags", r);
2050                                }
2051                        }
2052
2053                        atomic_inc(&comp.in_flight);
2054                        copy_from_journal(ic, i, j << ic->sb->log2_sectors_per_block,
2055                                          (k - j) << ic->sb->log2_sectors_per_block,
2056                                          get_data_sector(ic, area, offset),
2057                                          complete_copy_from_journal, io);
2058skip_io:
2059                        j = next_loop;
2060                }
2061        }
2062
2063        dm_bufio_write_dirty_buffers_async(ic->bufio);
2064
2065        blk_finish_plug(&plug);
2066
2067        complete_journal_op(&comp);
2068        wait_for_completion_io(&comp.comp);
2069
2070        dm_integrity_flush_buffers(ic);
2071}
2072
2073static void integrity_writer(struct work_struct *w)
2074{
2075        struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work);
2076        unsigned write_start, write_sections;
2077
2078        unsigned prev_free_sectors;
2079
2080        /* the following test is not needed, but it tests the replay code */
2081        if (READ_ONCE(ic->suspending) && !ic->meta_dev)
2082                return;
2083
2084        spin_lock_irq(&ic->endio_wait.lock);
2085        write_start = ic->committed_section;
2086        write_sections = ic->n_committed_sections;
2087        spin_unlock_irq(&ic->endio_wait.lock);
2088
2089        if (!write_sections)
2090                return;
2091
2092        do_journal_write(ic, write_start, write_sections, false);
2093
2094        spin_lock_irq(&ic->endio_wait.lock);
2095
2096        ic->committed_section += write_sections;
2097        wraparound_section(ic, &ic->committed_section);
2098        ic->n_committed_sections -= write_sections;
2099
2100        prev_free_sectors = ic->free_sectors;
2101        ic->free_sectors += write_sections * ic->journal_section_entries;
2102        if (unlikely(!prev_free_sectors))
2103                wake_up_locked(&ic->endio_wait);
2104
2105        spin_unlock_irq(&ic->endio_wait.lock);
2106}
2107
2108static void recalc_write_super(struct dm_integrity_c *ic)
2109{
2110        int r;
2111
2112        dm_integrity_flush_buffers(ic);
2113        if (dm_integrity_failed(ic))
2114                return;
2115
2116        sb_set_version(ic);
2117        r = sync_rw_sb(ic, REQ_OP_WRITE, 0);
2118        if (unlikely(r))
2119                dm_integrity_io_error(ic, "writing superblock", r);
2120}
2121
2122static void integrity_recalc(struct work_struct *w)
2123{
2124        struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work);
2125        struct dm_integrity_range range;
2126        struct dm_io_request io_req;
2127        struct dm_io_region io_loc;
2128        sector_t area, offset;
2129        sector_t metadata_block;
2130        unsigned metadata_offset;
2131        __u8 *t;
2132        unsigned i;
2133        int r;
2134        unsigned super_counter = 0;
2135
2136        spin_lock_irq(&ic->endio_wait.lock);
2137
2138next_chunk:
2139
2140        if (unlikely(READ_ONCE(ic->suspending)))
2141                goto unlock_ret;
2142
2143        range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
2144        if (unlikely(range.logical_sector >= ic->provided_data_sectors))
2145                goto unlock_ret;
2146
2147        get_area_and_offset(ic, range.logical_sector, &area, &offset);
2148        range.n_sectors = min((sector_t)RECALC_SECTORS, ic->provided_data_sectors - range.logical_sector);
2149        if (!ic->meta_dev)
2150                range.n_sectors = min(range.n_sectors, (1U << ic->sb->log2_interleave_sectors) - (unsigned)offset);
2151
2152        if (unlikely(!add_new_range(ic, &range, true)))
2153                wait_and_add_new_range(ic, &range);
2154
2155        spin_unlock_irq(&ic->endio_wait.lock);
2156
2157        if (unlikely(++super_counter == RECALC_WRITE_SUPER)) {
2158                recalc_write_super(ic);
2159                super_counter = 0;
2160        }
2161
2162        if (unlikely(dm_integrity_failed(ic)))
2163                goto err;
2164
2165        io_req.bi_op = REQ_OP_READ;
2166        io_req.bi_op_flags = 0;
2167        io_req.mem.type = DM_IO_VMA;
2168        io_req.mem.ptr.addr = ic->recalc_buffer;
2169        io_req.notify.fn = NULL;
2170        io_req.client = ic->io;
2171        io_loc.bdev = ic->dev->bdev;
2172        io_loc.sector = get_data_sector(ic, area, offset);
2173        io_loc.count = range.n_sectors;
2174
2175        r = dm_io(&io_req, 1, &io_loc, NULL);
2176        if (unlikely(r)) {
2177                dm_integrity_io_error(ic, "reading data", r);
2178                goto err;
2179        }
2180
2181        t = ic->recalc_tags;
2182        for (i = 0; i < range.n_sectors; i += ic->sectors_per_block) {
2183                integrity_sector_checksum(ic, range.logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
2184                t += ic->tag_size;
2185        }
2186
2187        metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2188
2189        r = dm_integrity_rw_tag(ic, ic->recalc_tags, &metadata_block, &metadata_offset, t - ic->recalc_tags, TAG_WRITE);
2190        if (unlikely(r)) {
2191                dm_integrity_io_error(ic, "writing tags", r);
2192                goto err;
2193        }
2194
2195        spin_lock_irq(&ic->endio_wait.lock);
2196        remove_range_unlocked(ic, &range);
2197        ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors);
2198        goto next_chunk;
2199
2200err:
2201        remove_range(ic, &range);
2202        return;
2203
2204unlock_ret:
2205        spin_unlock_irq(&ic->endio_wait.lock);
2206
2207        recalc_write_super(ic);
2208}
2209
2210static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
2211                         unsigned n_sections, unsigned char commit_seq)
2212{
2213        unsigned i, j, n;
2214
2215        if (!n_sections)
2216                return;
2217
2218        for (n = 0; n < n_sections; n++) {
2219                i = start_section + n;
2220                wraparound_section(ic, &i);
2221                for (j = 0; j < ic->journal_section_sectors; j++) {
2222                        struct journal_sector *js = access_journal(ic, i, j);
2223                        memset(&js->entries, 0, JOURNAL_SECTOR_DATA);
2224                        js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq);
2225                }
2226                for (j = 0; j < ic->journal_section_entries; j++) {
2227                        struct journal_entry *je = access_journal_entry(ic, i, j);
2228                        journal_entry_set_unused(je);
2229                }
2230        }
2231
2232        write_journal(ic, start_section, n_sections);
2233}
2234
2235static int find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, commit_id_t id)
2236{
2237        unsigned char k;
2238        for (k = 0; k < N_COMMIT_IDS; k++) {
2239                if (dm_integrity_commit_id(ic, i, j, k) == id)
2240                        return k;
2241        }
2242        dm_integrity_io_error(ic, "journal commit id", -EIO);
2243        return -EIO;
2244}
2245
2246static void replay_journal(struct dm_integrity_c *ic)
2247{
2248        unsigned i, j;
2249        bool used_commit_ids[N_COMMIT_IDS];
2250        unsigned max_commit_id_sections[N_COMMIT_IDS];
2251        unsigned write_start, write_sections;
2252        unsigned continue_section;
2253        bool journal_empty;
2254        unsigned char unused, last_used, want_commit_seq;
2255
2256        if (ic->mode == 'R')
2257                return;
2258
2259        if (ic->journal_uptodate)
2260                return;
2261
2262        last_used = 0;
2263        write_start = 0;
2264
2265        if (!ic->just_formatted) {
2266                DEBUG_print("reading journal\n");
2267                rw_journal(ic, REQ_OP_READ, 0, 0, ic->journal_sections, NULL);
2268                if (ic->journal_io)
2269                        DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal");
2270                if (ic->journal_io) {
2271                        struct journal_completion crypt_comp;
2272                        crypt_comp.ic = ic;
2273                        init_completion(&crypt_comp.comp);
2274                        crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0);
2275                        encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp);
2276                        wait_for_completion(&crypt_comp.comp);
2277                }
2278                DEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, "decrypted journal");
2279        }
2280
2281        if (dm_integrity_failed(ic))
2282                goto clear_journal;
2283
2284        journal_empty = true;
2285        memset(used_commit_ids, 0, sizeof used_commit_ids);
2286        memset(max_commit_id_sections, 0, sizeof max_commit_id_sections);
2287        for (i = 0; i < ic->journal_sections; i++) {
2288                for (j = 0; j < ic->journal_section_sectors; j++) {
2289                        int k;
2290                        struct journal_sector *js = access_journal(ic, i, j);
2291                        k = find_commit_seq(ic, i, j, js->commit_id);
2292                        if (k < 0)
2293                                goto clear_journal;
2294                        used_commit_ids[k] = true;
2295                        max_commit_id_sections[k] = i;
2296                }
2297                if (journal_empty) {
2298                        for (j = 0; j < ic->journal_section_entries; j++) {
2299                                struct journal_entry *je = access_journal_entry(ic, i, j);
2300                                if (!journal_entry_is_unused(je)) {
2301                                        journal_empty = false;
2302                                        break;
2303                                }
2304                        }
2305                }
2306        }
2307
2308        if (!used_commit_ids[N_COMMIT_IDS - 1]) {
2309                unused = N_COMMIT_IDS - 1;
2310                while (unused && !used_commit_ids[unused - 1])
2311                        unused--;
2312        } else {
2313                for (unused = 0; unused < N_COMMIT_IDS; unused++)
2314                        if (!used_commit_ids[unused])
2315                                break;
2316                if (unused == N_COMMIT_IDS) {
2317                        dm_integrity_io_error(ic, "journal commit ids", -EIO);
2318                        goto clear_journal;
2319                }
2320        }
2321        DEBUG_print("first unused commit seq %d [%d,%d,%d,%d]\n",
2322                    unused, used_commit_ids[0], used_commit_ids[1],
2323                    used_commit_ids[2], used_commit_ids[3]);
2324
2325        last_used = prev_commit_seq(unused);
2326        want_commit_seq = prev_commit_seq(last_used);
2327
2328        if (!used_commit_ids[want_commit_seq] && used_commit_ids[prev_commit_seq(want_commit_seq)])
2329                journal_empty = true;
2330
2331        write_start = max_commit_id_sections[last_used] + 1;
2332        if (unlikely(write_start >= ic->journal_sections))
2333                want_commit_seq = next_commit_seq(want_commit_seq);
2334        wraparound_section(ic, &write_start);
2335
2336        i = write_start;
2337        for (write_sections = 0; write_sections < ic->journal_sections; write_sections++) {
2338                for (j = 0; j < ic->journal_section_sectors; j++) {
2339                        struct journal_sector *js = access_journal(ic, i, j);
2340
2341                        if (js->commit_id != dm_integrity_commit_id(ic, i, j, want_commit_seq)) {
2342                                /*
2343                                 * This could be caused by crash during writing.
2344                                 * We won't replay the inconsistent part of the
2345                                 * journal.
2346                                 */
2347                                DEBUG_print("commit id mismatch at position (%u, %u): %d != %d\n",
2348                                            i, j, find_commit_seq(ic, i, j, js->commit_id), want_commit_seq);
2349                                goto brk;
2350                        }
2351                }
2352                i++;
2353                if (unlikely(i >= ic->journal_sections))
2354                        want_commit_seq = next_commit_seq(want_commit_seq);
2355                wraparound_section(ic, &i);
2356        }
2357brk:
2358
2359        if (!journal_empty) {
2360                DEBUG_print("replaying %u sections, starting at %u, commit seq %d\n",
2361                            write_sections, write_start, want_commit_seq);
2362                do_journal_write(ic, write_start, write_sections, true);
2363        }
2364
2365        if (write_sections == ic->journal_sections && (ic->mode == 'J' || journal_empty)) {
2366                continue_section = write_start;
2367                ic->commit_seq = want_commit_seq;
2368                DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq);
2369        } else {
2370                unsigned s;
2371                unsigned char erase_seq;
2372clear_journal:
2373                DEBUG_print("clearing journal\n");
2374
2375                erase_seq = prev_commit_seq(prev_commit_seq(last_used));
2376                s = write_start;
2377                init_journal(ic, s, 1, erase_seq);
2378                s++;
2379                wraparound_section(ic, &s);
2380                if (ic->journal_sections >= 2) {
2381                        init_journal(ic, s, ic->journal_sections - 2, erase_seq);
2382                        s += ic->journal_sections - 2;
2383                        wraparound_section(ic, &s);
2384                        init_journal(ic, s, 1, erase_seq);
2385                }
2386
2387                continue_section = 0;
2388                ic->commit_seq = next_commit_seq(erase_seq);
2389        }
2390
2391        ic->committed_section = continue_section;
2392        ic->n_committed_sections = 0;
2393
2394        ic->uncommitted_section = continue_section;
2395        ic->n_uncommitted_sections = 0;
2396
2397        ic->free_section = continue_section;
2398        ic->free_section_entry = 0;
2399        ic->free_sectors = ic->journal_entries;
2400
2401        ic->journal_tree_root = RB_ROOT;
2402        for (i = 0; i < ic->journal_entries; i++)
2403                init_journal_node(&ic->journal_tree[i]);
2404}
2405
2406static void dm_integrity_postsuspend(struct dm_target *ti)
2407{
2408        struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2409
2410        del_timer_sync(&ic->autocommit_timer);
2411
2412        WRITE_ONCE(ic->suspending, 1);
2413
2414        if (ic->recalc_wq)
2415                drain_workqueue(ic->recalc_wq);
2416
2417        queue_work(ic->commit_wq, &ic->commit_work);
2418        drain_workqueue(ic->commit_wq);
2419
2420        if (ic->mode == 'J') {
2421                if (ic->meta_dev)
2422                        queue_work(ic->writer_wq, &ic->writer_work);
2423                drain_workqueue(ic->writer_wq);
2424                dm_integrity_flush_buffers(ic);
2425        }
2426
2427        WRITE_ONCE(ic->suspending, 0);
2428
2429        BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
2430
2431        ic->journal_uptodate = true;
2432}
2433
2434static void dm_integrity_resume(struct dm_target *ti)
2435{
2436        struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2437
2438        replay_journal(ic);
2439
2440        if (ic->recalc_wq && ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
2441                __u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector);
2442                if (recalc_pos < ic->provided_data_sectors) {
2443                        queue_work(ic->recalc_wq, &ic->recalc_work);
2444                } else if (recalc_pos > ic->provided_data_sectors) {
2445                        ic->sb->recalc_sector = cpu_to_le64(ic->provided_data_sectors);
2446                        recalc_write_super(ic);
2447                }
2448        }
2449}
2450
2451static void dm_integrity_status(struct dm_target *ti, status_type_t type,
2452                                unsigned status_flags, char *result, unsigned maxlen)
2453{
2454        struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2455        unsigned arg_count;
2456        size_t sz = 0;
2457
2458        switch (type) {
2459        case STATUSTYPE_INFO:
2460                DMEMIT("%llu %llu",
2461                        (unsigned long long)atomic64_read(&ic->number_of_mismatches),
2462                        (unsigned long long)ic->provided_data_sectors);
2463                if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
2464                        DMEMIT(" %llu", (unsigned long long)le64_to_cpu(ic->sb->recalc_sector));
2465                else
2466                        DMEMIT(" -");
2467                break;
2468
2469        case STATUSTYPE_TABLE: {
2470                __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
2471                watermark_percentage += ic->journal_entries / 2;
2472                do_div(watermark_percentage, ic->journal_entries);
2473                arg_count = 5;
2474                arg_count += !!ic->meta_dev;
2475                arg_count += ic->sectors_per_block != 1;
2476                arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING));
2477                arg_count += !!ic->internal_hash_alg.alg_string;
2478                arg_count += !!ic->journal_crypt_alg.alg_string;
2479                arg_count += !!ic->journal_mac_alg.alg_string;
2480                DMEMIT("%s %llu %u %c %u", ic->dev->name, (unsigned long long)ic->start,
2481                       ic->tag_size, ic->mode, arg_count);
2482                if (ic->meta_dev)
2483                        DMEMIT(" meta_device:%s", ic->meta_dev->name);
2484                if (ic->sectors_per_block != 1)
2485                        DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
2486                if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
2487                        DMEMIT(" recalculate");
2488                DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
2489                DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
2490                DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
2491                DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage);
2492                DMEMIT(" commit_time:%u", ic->autocommit_msec);
2493
2494#define EMIT_ALG(a, n)                                                  \
2495                do {                                                    \
2496                        if (ic->a.alg_string) {                         \
2497                                DMEMIT(" %s:%s", n, ic->a.alg_string);  \
2498                                if (ic->a.key_string)                   \
2499                                        DMEMIT(":%s", ic->a.key_string);\
2500                        }                                               \
2501                } while (0)
2502                EMIT_ALG(internal_hash_alg, "internal_hash");
2503                EMIT_ALG(journal_crypt_alg, "journal_crypt");
2504                EMIT_ALG(journal_mac_alg, "journal_mac");
2505                break;
2506        }
2507        }
2508}
2509
2510static int dm_integrity_iterate_devices(struct dm_target *ti,
2511                                        iterate_devices_callout_fn fn, void *data)
2512{
2513        struct dm_integrity_c *ic = ti->private;
2514
2515        if (!ic->meta_dev)
2516                return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data);
2517        else
2518                return fn(ti, ic->dev, 0, ti->len, data);
2519}
2520
2521static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *limits)
2522{
2523        struct dm_integrity_c *ic = ti->private;
2524
2525        if (ic->sectors_per_block > 1) {
2526                limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
2527                limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
2528                blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
2529        }
2530}
2531
2532static void calculate_journal_section_size(struct dm_integrity_c *ic)
2533{
2534        unsigned sector_space = JOURNAL_SECTOR_DATA;
2535
2536        ic->journal_sections = le32_to_cpu(ic->sb->journal_sections);
2537        ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block]) + ic->tag_size,
2538                                         JOURNAL_ENTRY_ROUNDUP);
2539
2540        if (ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC))
2541                sector_space -= JOURNAL_MAC_PER_SECTOR;
2542        ic->journal_entries_per_sector = sector_space / ic->journal_entry_size;
2543        ic->journal_section_entries = ic->journal_entries_per_sector * JOURNAL_BLOCK_SECTORS;
2544        ic->journal_section_sectors = (ic->journal_section_entries << ic->sb->log2_sectors_per_block) + JOURNAL_BLOCK_SECTORS;
2545        ic->journal_entries = ic->journal_section_entries * ic->journal_sections;
2546}
2547
2548static int calculate_device_limits(struct dm_integrity_c *ic)
2549{
2550        __u64 initial_sectors;
2551
2552        calculate_journal_section_size(ic);
2553        initial_sectors = SB_SECTORS + (__u64)ic->journal_section_sectors * ic->journal_sections;
2554        if (initial_sectors + METADATA_PADDING_SECTORS >= ic->meta_device_sectors || initial_sectors > UINT_MAX)
2555                return -EINVAL;
2556        ic->initial_sectors = initial_sectors;
2557
2558        if (!ic->meta_dev) {
2559                sector_t last_sector, last_area, last_offset;
2560
2561                ic->metadata_run = roundup((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block),
2562                                           (__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS)) >> SECTOR_SHIFT;
2563                if (!(ic->metadata_run & (ic->metadata_run - 1)))
2564                        ic->log2_metadata_run = __ffs(ic->metadata_run);
2565                else
2566                        ic->log2_metadata_run = -1;
2567
2568                get_area_and_offset(ic, ic->provided_data_sectors - 1, &last_area, &last_offset);
2569                last_sector = get_data_sector(ic, last_area, last_offset);
2570                if (last_sector < ic->start || last_sector >= ic->meta_device_sectors)
2571                        return -EINVAL;
2572        } else {
2573                __u64 meta_size = ic->provided_data_sectors * ic->tag_size;
2574                meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1))
2575                                >> (ic->log2_buffer_sectors + SECTOR_SHIFT);
2576                meta_size <<= ic->log2_buffer_sectors;
2577                if (ic->initial_sectors + meta_size < ic->initial_sectors ||
2578                    ic->initial_sectors + meta_size > ic->meta_device_sectors)
2579                        return -EINVAL;
2580                ic->metadata_run = 1;
2581                ic->log2_metadata_run = 0;
2582        }
2583
2584        return 0;
2585}
2586
2587static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sectors, unsigned interleave_sectors)
2588{
2589        unsigned journal_sections;
2590        int test_bit;
2591
2592        memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT);
2593        memcpy(ic->sb->magic, SB_MAGIC, 8);
2594        ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size);
2595        ic->sb->log2_sectors_per_block = __ffs(ic->sectors_per_block);
2596        if (ic->journal_mac_alg.alg_string)
2597                ic->sb->flags |= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC);
2598
2599        calculate_journal_section_size(ic);
2600        journal_sections = journal_sectors / ic->journal_section_sectors;
2601        if (!journal_sections)
2602                journal_sections = 1;
2603
2604        if (!ic->meta_dev) {
2605                ic->sb->journal_sections = cpu_to_le32(journal_sections);
2606                if (!interleave_sectors)
2607                        interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
2608                ic->sb->log2_interleave_sectors = __fls(interleave_sectors);
2609                ic->sb->log2_interleave_sectors = max((__u8)MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
2610                ic->sb->log2_interleave_sectors = min((__u8)MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
2611
2612                ic->provided_data_sectors = 0;
2613                for (test_bit = fls64(ic->meta_device_sectors) - 1; test_bit >= 3; test_bit--) {
2614                        __u64 prev_data_sectors = ic->provided_data_sectors;
2615
2616                        ic->provided_data_sectors |= (sector_t)1 << test_bit;
2617                        if (calculate_device_limits(ic))
2618                                ic->provided_data_sectors = prev_data_sectors;
2619                }
2620                if (!ic->provided_data_sectors)
2621                        return -EINVAL;
2622        } else {
2623                ic->sb->log2_interleave_sectors = 0;
2624                ic->provided_data_sectors = ic->data_device_sectors;
2625                ic->provided_data_sectors &= ~(sector_t)(ic->sectors_per_block - 1);
2626
2627try_smaller_buffer:
2628                ic->sb->journal_sections = cpu_to_le32(0);
2629                for (test_bit = fls(journal_sections) - 1; test_bit >= 0; test_bit--) {
2630                        __u32 prev_journal_sections = le32_to_cpu(ic->sb->journal_sections);
2631                        __u32 test_journal_sections = prev_journal_sections | (1U << test_bit);
2632                        if (test_journal_sections > journal_sections)
2633                                continue;
2634                        ic->sb->journal_sections = cpu_to_le32(test_journal_sections);
2635                        if (calculate_device_limits(ic))
2636                                ic->sb->journal_sections = cpu_to_le32(prev_journal_sections);
2637
2638                }
2639                if (!le32_to_cpu(ic->sb->journal_sections)) {
2640                        if (ic->log2_buffer_sectors > 3) {
2641                                ic->log2_buffer_sectors--;
2642                                goto try_smaller_buffer;
2643                        }
2644                        return -EINVAL;
2645                }
2646        }
2647
2648        ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
2649
2650        sb_set_version(ic);
2651
2652        return 0;
2653}
2654
2655static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
2656{
2657        struct gendisk *disk = dm_disk(dm_table_get_md(ti->table));
2658        struct blk_integrity bi;
2659
2660        memset(&bi, 0, sizeof(bi));
2661        bi.profile = &dm_integrity_profile;
2662        bi.tuple_size = ic->tag_size;
2663        bi.tag_size = bi.tuple_size;
2664        bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT;
2665
2666        blk_integrity_register(disk, &bi);
2667        blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
2668}
2669
2670static void dm_integrity_free_page_list(struct dm_integrity_c *ic, struct page_list *pl)
2671{
2672        unsigned i;
2673
2674        if (!pl)
2675                return;
2676        for (i = 0; i < ic->journal_pages; i++)
2677                if (pl[i].page)
2678                        __free_page(pl[i].page);
2679        kvfree(pl);
2680}
2681
2682static struct page_list *dm_integrity_alloc_page_list(struct dm_integrity_c *ic)
2683{
2684        size_t page_list_desc_size = ic->journal_pages * sizeof(struct page_list);
2685        struct page_list *pl;
2686        unsigned i;
2687
2688        pl = kvmalloc(page_list_desc_size, GFP_KERNEL | __GFP_ZERO);
2689        if (!pl)
2690                return NULL;
2691
2692        for (i = 0; i < ic->journal_pages; i++) {
2693                pl[i].page = alloc_page(GFP_KERNEL);
2694                if (!pl[i].page) {
2695                        dm_integrity_free_page_list(ic, pl);
2696                        return NULL;
2697                }
2698                if (i)
2699                        pl[i - 1].next = &pl[i];
2700        }
2701
2702        return pl;
2703}
2704
2705static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl)
2706{
2707        unsigned i;
2708        for (i = 0; i < ic->journal_sections; i++)
2709                kvfree(sl[i]);
2710        kvfree(sl);
2711}
2712
2713static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic, struct page_list *pl)
2714{
2715        struct scatterlist **sl;
2716        unsigned i;
2717
2718        sl = kvmalloc_array(ic->journal_sections,
2719                            sizeof(struct scatterlist *),
2720                            GFP_KERNEL | __GFP_ZERO);
2721        if (!sl)
2722                return NULL;
2723
2724        for (i = 0; i < ic->journal_sections; i++) {
2725                struct scatterlist *s;
2726                unsigned start_index, start_offset;
2727                unsigned end_index, end_offset;
2728                unsigned n_pages;
2729                unsigned idx;
2730
2731                page_list_location(ic, i, 0, &start_index, &start_offset);
2732                page_list_location(ic, i, ic->journal_section_sectors - 1, &end_index, &end_offset);
2733
2734                n_pages = (end_index - start_index + 1);
2735
2736                s = kvmalloc_array(n_pages, sizeof(struct scatterlist),
2737                                   GFP_KERNEL);
2738                if (!s) {
2739                        dm_integrity_free_journal_scatterlist(ic, sl);
2740                        return NULL;
2741                }
2742
2743                sg_init_table(s, n_pages);
2744                for (idx = start_index; idx <= end_index; idx++) {
2745                        char *va = lowmem_page_address(pl[idx].page);
2746                        unsigned start = 0, end = PAGE_SIZE;
2747                        if (idx == start_index)
2748                                start = start_offset;
2749                        if (idx == end_index)
2750                                end = end_offset + (1 << SECTOR_SHIFT);
2751                        sg_set_buf(&s[idx - start_index], va + start, end - start);
2752                }
2753
2754                sl[i] = s;
2755        }
2756
2757        return sl;
2758}
2759
2760static void free_alg(struct alg_spec *a)
2761{
2762        kzfree(a->alg_string);
2763        kzfree(a->key);
2764        memset(a, 0, sizeof *a);
2765}
2766
2767static int get_alg_and_key(const char *arg, struct alg_spec *a, char **error, char *error_inval)
2768{
2769        char *k;
2770
2771        free_alg(a);
2772
2773        a->alg_string = kstrdup(strchr(arg, ':') + 1, GFP_KERNEL);
2774        if (!a->alg_string)
2775                goto nomem;
2776
2777        k = strchr(a->alg_string, ':');
2778        if (k) {
2779                *k = 0;
2780                a->key_string = k + 1;
2781                if (strlen(a->key_string) & 1)
2782                        goto inval;
2783
2784                a->key_size = strlen(a->key_string) / 2;
2785                a->key = kmalloc(a->key_size, GFP_KERNEL);
2786                if (!a->key)
2787                        goto nomem;
2788                if (hex2bin(a->key, a->key_string, a->key_size))
2789                        goto inval;
2790        }
2791
2792        return 0;
2793inval:
2794        *error = error_inval;
2795        return -EINVAL;
2796nomem:
2797        *error = "Out of memory for an argument";
2798        return -ENOMEM;
2799}
2800
2801static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
2802                   char *error_alg, char *error_key)
2803{
2804        int r;
2805
2806        if (a->alg_string) {
2807                *hash = crypto_alloc_shash(a->alg_string, 0, 0);
2808                if (IS_ERR(*hash)) {
2809                        *error = error_alg;
2810                        r = PTR_ERR(*hash);
2811                        *hash = NULL;
2812                        return r;
2813                }
2814
2815                if (a->key) {
2816                        r = crypto_shash_setkey(*hash, a->key, a->key_size);
2817                        if (r) {
2818                                *error = error_key;
2819                                return r;
2820                        }
2821                } else if (crypto_shash_get_flags(*hash) & CRYPTO_TFM_NEED_KEY) {
2822                        *error = error_key;
2823                        return -ENOKEY;
2824                }
2825        }
2826
2827        return 0;
2828}
2829
2830static int create_journal(struct dm_integrity_c *ic, char **error)
2831{
2832        int r = 0;
2833        unsigned i;
2834        __u64 journal_pages, journal_desc_size, journal_tree_size;
2835        unsigned char *crypt_data = NULL, *crypt_iv = NULL;
2836        struct skcipher_request *req = NULL;
2837
2838        ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
2839        ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
2840        ic->commit_ids[2] = cpu_to_le64(0x3333333333333333ULL);
2841        ic->commit_ids[3] = cpu_to_le64(0x4444444444444444ULL);
2842
2843        journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors,
2844                                PAGE_SIZE >> SECTOR_SHIFT) >> (PAGE_SHIFT - SECTOR_SHIFT);
2845        journal_desc_size = journal_pages * sizeof(struct page_list);
2846        if (journal_pages >= totalram_pages() - totalhigh_pages() || journal_desc_size > ULONG_MAX) {
2847                *error = "Journal doesn't fit into memory";
2848                r = -ENOMEM;
2849                goto bad;
2850        }
2851        ic->journal_pages = journal_pages;
2852
2853        ic->journal = dm_integrity_alloc_page_list(ic);
2854        if (!ic->journal) {
2855                *error = "Could not allocate memory for journal";
2856                r = -ENOMEM;
2857                goto bad;
2858        }
2859        if (ic->journal_crypt_alg.alg_string) {
2860                unsigned ivsize, blocksize;
2861                struct journal_completion comp;
2862
2863                comp.ic = ic;
2864                ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, 0);
2865                if (IS_ERR(ic->journal_crypt)) {
2866                        *error = "Invalid journal cipher";
2867                        r = PTR_ERR(ic->journal_crypt);
2868                        ic->journal_crypt = NULL;
2869                        goto bad;
2870                }
2871                ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
2872                blocksize = crypto_skcipher_blocksize(ic->journal_crypt);
2873
2874                if (ic->journal_crypt_alg.key) {
2875                        r = crypto_skcipher_setkey(ic->journal_crypt, ic->journal_crypt_alg.key,
2876                                                   ic->journal_crypt_alg.key_size);
2877                        if (r) {
2878                                *error = "Error setting encryption key";
2879                                goto bad;
2880                        }
2881                }
2882                DEBUG_print("cipher %s, block size %u iv size %u\n",
2883                            ic->journal_crypt_alg.alg_string, blocksize, ivsize);
2884
2885                ic->journal_io = dm_integrity_alloc_page_list(ic);
2886                if (!ic->journal_io) {
2887                        *error = "Could not allocate memory for journal io";
2888                        r = -ENOMEM;
2889                        goto bad;
2890                }
2891
2892                if (blocksize == 1) {
2893                        struct scatterlist *sg;
2894
2895                        req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
2896                        if (!req) {
2897                                *error = "Could not allocate crypt request";
2898                                r = -ENOMEM;
2899                                goto bad;
2900                        }
2901
2902                        crypt_iv = kmalloc(ivsize, GFP_KERNEL);
2903                        if (!crypt_iv) {
2904                                *error = "Could not allocate iv";
2905                                r = -ENOMEM;
2906                                goto bad;
2907                        }
2908
2909                        ic->journal_xor = dm_integrity_alloc_page_list(ic);
2910                        if (!ic->journal_xor) {
2911                                *error = "Could not allocate memory for journal xor";
2912                                r = -ENOMEM;
2913                                goto bad;
2914                        }
2915
2916                        sg = kvmalloc_array(ic->journal_pages + 1,
2917                                            sizeof(struct scatterlist),
2918                                            GFP_KERNEL);
2919                        if (!sg) {
2920                                *error = "Unable to allocate sg list";
2921                                r = -ENOMEM;
2922                                goto bad;
2923                        }
2924                        sg_init_table(sg, ic->journal_pages + 1);
2925                        for (i = 0; i < ic->journal_pages; i++) {
2926                                char *va = lowmem_page_address(ic->journal_xor[i].page);
2927                                clear_page(va);
2928                                sg_set_buf(&sg[i], va, PAGE_SIZE);
2929                        }
2930                        sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
2931                        memset(crypt_iv, 0x00, ivsize);
2932
2933                        skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
2934                        init_completion(&comp.comp);
2935                        comp.in_flight = (atomic_t)ATOMIC_INIT(1);
2936                        if (do_crypt(true, req, &comp))
2937                                wait_for_completion(&comp.comp);
2938                        kvfree(sg);
2939                        r = dm_integrity_failed(ic);
2940                        if (r) {
2941                                *error = "Unable to encrypt journal";
2942                                goto bad;
2943                        }
2944                        DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data");
2945
2946                        crypto_free_skcipher(ic->journal_crypt);
2947                        ic->journal_crypt = NULL;
2948                } else {
2949                        unsigned crypt_len = roundup(ivsize, blocksize);
2950
2951                        req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
2952                        if (!req) {
2953                                *error = "Could not allocate crypt request";
2954                                r = -ENOMEM;
2955                                goto bad;
2956                        }
2957
2958                        crypt_iv = kmalloc(ivsize, GFP_KERNEL);
2959                        if (!crypt_iv) {
2960                                *error = "Could not allocate iv";
2961                                r = -ENOMEM;
2962                                goto bad;
2963                        }
2964
2965                        crypt_data = kmalloc(crypt_len, GFP_KERNEL);
2966                        if (!crypt_data) {
2967                                *error = "Unable to allocate crypt data";
2968                                r = -ENOMEM;
2969                                goto bad;
2970                        }
2971
2972                        ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
2973                        if (!ic->journal_scatterlist) {
2974                                *error = "Unable to allocate sg list";
2975                                r = -ENOMEM;
2976                                goto bad;
2977                        }
2978                        ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal_io);
2979                        if (!ic->journal_io_scatterlist) {
2980                                *error = "Unable to allocate sg list";
2981                                r = -ENOMEM;
2982                                goto bad;
2983                        }
2984                        ic->sk_requests = kvmalloc_array(ic->journal_sections,
2985                                                         sizeof(struct skcipher_request *),
2986                                                         GFP_KERNEL | __GFP_ZERO);
2987                        if (!ic->sk_requests) {
2988                                *error = "Unable to allocate sk requests";
2989                                r = -ENOMEM;
2990                                goto bad;
2991                        }
2992                        for (i = 0; i < ic->journal_sections; i++) {
2993                                struct scatterlist sg;
2994                                struct skcipher_request *section_req;
2995                                __u32 section_le = cpu_to_le32(i);
2996
2997                                memset(crypt_iv, 0x00, ivsize);
2998                                memset(crypt_data, 0x00, crypt_len);
2999                                memcpy(crypt_data, &section_le, min((size_t)crypt_len, sizeof(section_le)));
3000
3001                                sg_init_one(&sg, crypt_data, crypt_len);
3002                                skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv);
3003                                init_completion(&comp.comp);
3004                                comp.in_flight = (atomic_t)ATOMIC_INIT(1);
3005                                if (do_crypt(true, req, &comp))
3006                                        wait_for_completion(&comp.comp);
3007
3008                                r = dm_integrity_failed(ic);
3009                                if (r) {
3010                                        *error = "Unable to generate iv";
3011                                        goto bad;
3012                                }
3013
3014                                section_req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3015                                if (!section_req) {
3016                                        *error = "Unable to allocate crypt request";
3017                                        r = -ENOMEM;
3018                                        goto bad;
3019                                }
3020                                section_req->iv = kmalloc_array(ivsize, 2,
3021                                                                GFP_KERNEL);
3022                                if (!section_req->iv) {
3023                                        skcipher_request_free(section_req);
3024                                        *error = "Unable to allocate iv";
3025                                        r = -ENOMEM;
3026                                        goto bad;
3027                                }
3028                                memcpy(section_req->iv + ivsize, crypt_data, ivsize);
3029                                section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT;
3030                                ic->sk_requests[i] = section_req;
3031                                DEBUG_bytes(crypt_data, ivsize, "iv(%u)", i);
3032                        }
3033                }
3034        }
3035
3036        for (i = 0; i < N_COMMIT_IDS; i++) {
3037                unsigned j;
3038retest_commit_id:
3039                for (j = 0; j < i; j++) {
3040                        if (ic->commit_ids[j] == ic->commit_ids[i]) {
3041                                ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1);
3042                                goto retest_commit_id;
3043                        }
3044                }
3045                DEBUG_print("commit id %u: %016llx\n", i, ic->commit_ids[i]);
3046        }
3047
3048        journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node);
3049        if (journal_tree_size > ULONG_MAX) {
3050                *error = "Journal doesn't fit into memory";
3051                r = -ENOMEM;
3052                goto bad;
3053        }
3054        ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL);
3055        if (!ic->journal_tree) {
3056                *error = "Could not allocate memory for journal tree";
3057                r = -ENOMEM;
3058        }
3059bad:
3060        kfree(crypt_data);
3061        kfree(crypt_iv);
3062        skcipher_request_free(req);
3063
3064        return r;
3065}
3066
3067/*
3068 * Construct a integrity mapping
3069 *
3070 * Arguments:
3071 *      device
3072 *      offset from the start of the device
3073 *      tag size
3074 *      D - direct writes, J - journal writes, R - recovery mode
3075 *      number of optional arguments
3076 *      optional arguments:
3077 *              journal_sectors
3078 *              interleave_sectors
3079 *              buffer_sectors
3080 *              journal_watermark
3081 *              commit_time
3082 *              internal_hash
3083 *              journal_crypt
3084 *              journal_mac
3085 *              block_size
3086 */
3087static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
3088{
3089        struct dm_integrity_c *ic;
3090        char dummy;
3091        int r;
3092        unsigned extra_args;
3093        struct dm_arg_set as;
3094        static const struct dm_arg _args[] = {
3095                {0, 9, "Invalid number of feature args"},
3096        };
3097        unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
3098        bool recalculate;
3099        bool should_write_sb;
3100        __u64 threshold;
3101        unsigned long long start;
3102
3103#define DIRECT_ARGUMENTS        4
3104
3105        if (argc <= DIRECT_ARGUMENTS) {
3106                ti->error = "Invalid argument count";
3107                return -EINVAL;
3108        }
3109
3110        ic = kzalloc(sizeof(struct dm_integrity_c), GFP_KERNEL);
3111        if (!ic) {
3112                ti->error = "Cannot allocate integrity context";
3113                return -ENOMEM;
3114        }
3115        ti->private = ic;
3116        ti->per_io_data_size = sizeof(struct dm_integrity_io);
3117
3118        ic->in_progress = RB_ROOT;
3119        INIT_LIST_HEAD(&ic->wait_list);
3120        init_waitqueue_head(&ic->endio_wait);
3121        bio_list_init(&ic->flush_bio_list);
3122        init_waitqueue_head(&ic->copy_to_journal_wait);
3123        init_completion(&ic->crypto_backoff);
3124        atomic64_set(&ic->number_of_mismatches, 0);
3125
3126        r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev);
3127        if (r) {
3128                ti->error = "Device lookup failed";
3129                goto bad;
3130        }
3131
3132        if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) {
3133                ti->error = "Invalid starting offset";
3134                r = -EINVAL;
3135                goto bad;
3136        }
3137        ic->start = start;
3138
3139        if (strcmp(argv[2], "-")) {
3140                if (sscanf(argv[2], "%u%c", &ic->tag_size, &dummy) != 1 || !ic->tag_size) {
3141                        ti->error = "Invalid tag size";
3142                        r = -EINVAL;
3143                        goto bad;
3144                }
3145        }
3146
3147        if (!strcmp(argv[3], "J") || !strcmp(argv[3], "D") || !strcmp(argv[3], "R"))
3148                ic->mode = argv[3][0];
3149        else {
3150                ti->error = "Invalid mode (expecting J, D, R)";
3151                r = -EINVAL;
3152                goto bad;
3153        }
3154
3155        journal_sectors = 0;
3156        interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
3157        buffer_sectors = DEFAULT_BUFFER_SECTORS;
3158        journal_watermark = DEFAULT_JOURNAL_WATERMARK;
3159        sync_msec = DEFAULT_SYNC_MSEC;
3160        recalculate = false;
3161        ic->sectors_per_block = 1;
3162
3163        as.argc = argc - DIRECT_ARGUMENTS;
3164        as.argv = argv + DIRECT_ARGUMENTS;
3165        r = dm_read_arg_group(_args, &as, &extra_args, &ti->error);
3166        if (r)
3167                goto bad;
3168
3169        while (extra_args--) {
3170                const char *opt_string;
3171                unsigned val;
3172                opt_string = dm_shift_arg(&as);
3173                if (!opt_string) {
3174                        r = -EINVAL;
3175                        ti->error = "Not enough feature arguments";
3176                        goto bad;
3177                }
3178                if (sscanf(opt_string, "journal_sectors:%u%c", &val, &dummy) == 1)
3179                        journal_sectors = val ? val : 1;
3180                else if (sscanf(opt_string, "interleave_sectors:%u%c", &val, &dummy) == 1)
3181                        interleave_sectors = val;
3182                else if (sscanf(opt_string, "buffer_sectors:%u%c", &val, &dummy) == 1)
3183                        buffer_sectors = val;
3184                else if (sscanf(opt_string, "journal_watermark:%u%c", &val, &dummy) == 1 && val <= 100)
3185                        journal_watermark = val;
3186                else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
3187                        sync_msec = val;
3188                else if (!memcmp(opt_string, "meta_device:", strlen("meta_device:"))) {
3189                        if (ic->meta_dev) {
3190                                dm_put_device(ti, ic->meta_dev);
3191                                ic->meta_dev = NULL;
3192                        }
3193                        r = dm_get_device(ti, strchr(opt_string, ':') + 1, dm_table_get_mode(ti->table), &ic->meta_dev);
3194                        if (r) {
3195                                ti->error = "Device lookup failed";
3196                                goto bad;
3197                        }
3198                } else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) {
3199                        if (val < 1 << SECTOR_SHIFT ||
3200                            val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT ||
3201                            (val & (val -1))) {
3202                                r = -EINVAL;
3203                                ti->error = "Invalid block_size argument";
3204                                goto bad;
3205                        }
3206                        ic->sectors_per_block = val >> SECTOR_SHIFT;
3207                } else if (!memcmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
3208                        r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
3209                                            "Invalid internal_hash argument");
3210                        if (r)
3211                                goto bad;
3212                } else if (!memcmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
3213                        r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
3214                                            "Invalid journal_crypt argument");
3215                        if (r)
3216                                goto bad;
3217                } else if (!memcmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
3218                        r = get_alg_and_key(opt_string, &ic->journal_mac_alg,  &ti->error,
3219                                            "Invalid journal_mac argument");
3220                        if (r)
3221                                goto bad;
3222                } else if (!strcmp(opt_string, "recalculate")) {
3223                        recalculate = true;
3224                } else {
3225                        r = -EINVAL;
3226                        ti->error = "Invalid argument";
3227                        goto bad;
3228                }
3229        }
3230
3231        ic->data_device_sectors = i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT;
3232        if (!ic->meta_dev)
3233                ic->meta_device_sectors = ic->data_device_sectors;
3234        else
3235                ic->meta_device_sectors = i_size_read(ic->meta_dev->bdev->bd_inode) >> SECTOR_SHIFT;
3236
3237        if (!journal_sectors) {
3238                journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS,
3239                        ic->data_device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR);
3240        }
3241
3242        if (!buffer_sectors)
3243                buffer_sectors = 1;
3244        ic->log2_buffer_sectors = min((int)__fls(buffer_sectors), 31 - SECTOR_SHIFT);
3245
3246        r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error,
3247                    "Invalid internal hash", "Error setting internal hash key");
3248        if (r)
3249                goto bad;
3250
3251        r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error,
3252                    "Invalid journal mac", "Error setting journal mac key");
3253        if (r)
3254                goto bad;
3255
3256        if (!ic->tag_size) {
3257                if (!ic->internal_hash) {
3258                        ti->error = "Unknown tag size";
3259                        r = -EINVAL;
3260                        goto bad;
3261                }
3262                ic->tag_size = crypto_shash_digestsize(ic->internal_hash);
3263        }
3264        if (ic->tag_size > MAX_TAG_SIZE) {
3265                ti->error = "Too big tag size";
3266                r = -EINVAL;
3267                goto bad;
3268        }
3269        if (!(ic->tag_size & (ic->tag_size - 1)))
3270                ic->log2_tag_size = __ffs(ic->tag_size);
3271        else
3272                ic->log2_tag_size = -1;
3273
3274        ic->autocommit_jiffies = msecs_to_jiffies(sync_msec);
3275        ic->autocommit_msec = sync_msec;
3276        timer_setup(&ic->autocommit_timer, autocommit_fn, 0);
3277
3278        ic->io = dm_io_client_create();
3279        if (IS_ERR(ic->io)) {
3280                r = PTR_ERR(ic->io);
3281                ic->io = NULL;
3282                ti->error = "Cannot allocate dm io";
3283                goto bad;
3284        }
3285
3286        r = mempool_init_slab_pool(&ic->journal_io_mempool, JOURNAL_IO_MEMPOOL, journal_io_cache);
3287        if (r) {
3288                ti->error = "Cannot allocate mempool";
3289                goto bad;
3290        }
3291
3292        ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
3293                                          WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE);
3294        if (!ic->metadata_wq) {
3295                ti->error = "Cannot allocate workqueue";
3296                r = -ENOMEM;
3297                goto bad;
3298        }
3299
3300        /*
3301         * If this workqueue were percpu, it would cause bio reordering
3302         * and reduced performance.
3303         */
3304        ic->wait_wq = alloc_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
3305        if (!ic->wait_wq) {
3306                ti->error = "Cannot allocate workqueue";
3307                r = -ENOMEM;
3308                goto bad;
3309        }
3310
3311        ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1);
3312        if (!ic->commit_wq) {
3313                ti->error = "Cannot allocate workqueue";
3314                r = -ENOMEM;
3315                goto bad;
3316        }
3317        INIT_WORK(&ic->commit_work, integrity_commit);
3318
3319        if (ic->mode == 'J') {
3320                ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1);
3321                if (!ic->writer_wq) {
3322                        ti->error = "Cannot allocate workqueue";
3323                        r = -ENOMEM;
3324                        goto bad;
3325                }
3326                INIT_WORK(&ic->writer_work, integrity_writer);
3327        }
3328
3329        ic->sb = alloc_pages_exact(SB_SECTORS << SECTOR_SHIFT, GFP_KERNEL);
3330        if (!ic->sb) {
3331                r = -ENOMEM;
3332                ti->error = "Cannot allocate superblock area";
3333                goto bad;
3334        }
3335
3336        r = sync_rw_sb(ic, REQ_OP_READ, 0);
3337        if (r) {
3338                ti->error = "Error reading superblock";
3339                goto bad;
3340        }
3341        should_write_sb = false;
3342        if (memcmp(ic->sb->magic, SB_MAGIC, 8)) {
3343                if (ic->mode != 'R') {
3344                        if (memchr_inv(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT)) {
3345                                r = -EINVAL;
3346                                ti->error = "The device is not initialized";
3347                                goto bad;
3348                        }
3349                }
3350
3351                r = initialize_superblock(ic, journal_sectors, interleave_sectors);
3352                if (r) {
3353                        ti->error = "Could not initialize superblock";
3354                        goto bad;
3355                }
3356                if (ic->mode != 'R')
3357                        should_write_sb = true;
3358        }
3359
3360        if (!ic->sb->version || ic->sb->version > SB_VERSION_2) {
3361                r = -EINVAL;
3362                ti->error = "Unknown version";
3363                goto bad;
3364        }
3365        if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) {
3366                r = -EINVAL;
3367                ti->error = "Tag size doesn't match the information in superblock";
3368                goto bad;
3369        }
3370        if (ic->sb->log2_sectors_per_block != __ffs(ic->sectors_per_block)) {
3371                r = -EINVAL;
3372                ti->error = "Block size doesn't match the information in superblock";
3373                goto bad;
3374        }
3375        if (!le32_to_cpu(ic->sb->journal_sections)) {
3376                r = -EINVAL;
3377                ti->error = "Corrupted superblock, journal_sections is 0";
3378                goto bad;
3379        }
3380        /* make sure that ti->max_io_len doesn't overflow */
3381        if (!ic->meta_dev) {
3382                if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
3383                    ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
3384                        r = -EINVAL;
3385                        ti->error = "Invalid interleave_sectors in the superblock";
3386                        goto bad;
3387                }
3388        } else {
3389                if (ic->sb->log2_interleave_sectors) {
3390                        r = -EINVAL;
3391                        ti->error = "Invalid interleave_sectors in the superblock";
3392                        goto bad;
3393                }
3394        }
3395        ic->provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors);
3396        if (ic->provided_data_sectors != le64_to_cpu(ic->sb->provided_data_sectors)) {
3397                /* test for overflow */
3398                r = -EINVAL;
3399                ti->error = "The superblock has 64-bit device size, but the kernel was compiled with 32-bit sectors";
3400                goto bad;
3401        }
3402        if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string) {
3403                r = -EINVAL;
3404                ti->error = "Journal mac mismatch";
3405                goto bad;
3406        }
3407
3408try_smaller_buffer:
3409        r = calculate_device_limits(ic);
3410        if (r) {
3411                if (ic->meta_dev) {
3412                        if (ic->log2_buffer_sectors > 3) {
3413                                ic->log2_buffer_sectors--;
3414                                goto try_smaller_buffer;
3415                        }
3416                }
3417                ti->error = "The device is too small";
3418                goto bad;
3419        }
3420        if (!ic->meta_dev)
3421                ic->log2_buffer_sectors = min(ic->log2_buffer_sectors, (__u8)__ffs(ic->metadata_run));
3422
3423        if (ti->len > ic->provided_data_sectors) {
3424                r = -EINVAL;
3425                ti->error = "Not enough provided sectors for requested mapping size";
3426                goto bad;
3427        }
3428
3429
3430        threshold = (__u64)ic->journal_entries * (100 - journal_watermark);
3431        threshold += 50;
3432        do_div(threshold, 100);
3433        ic->free_sectors_threshold = threshold;
3434
3435        DEBUG_print("initialized:\n");
3436        DEBUG_print("   integrity_tag_size %u\n", le16_to_cpu(ic->sb->integrity_tag_size));
3437        DEBUG_print("   journal_entry_size %u\n", ic->journal_entry_size);
3438        DEBUG_print("   journal_entries_per_sector %u\n", ic->journal_entries_per_sector);
3439        DEBUG_print("   journal_section_entries %u\n", ic->journal_section_entries);
3440        DEBUG_print("   journal_section_sectors %u\n", ic->journal_section_sectors);
3441        DEBUG_print("   journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
3442        DEBUG_print("   journal_entries %u\n", ic->journal_entries);
3443        DEBUG_print("   log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
3444        DEBUG_print("   device_sectors 0x%llx\n", (unsigned long long)ic->device_sectors);
3445        DEBUG_print("   initial_sectors 0x%x\n", ic->initial_sectors);
3446        DEBUG_print("   metadata_run 0x%x\n", ic->metadata_run);
3447        DEBUG_print("   log2_metadata_run %d\n", ic->log2_metadata_run);
3448        DEBUG_print("   provided_data_sectors 0x%llx (%llu)\n", (unsigned long long)ic->provided_data_sectors,
3449                    (unsigned long long)ic->provided_data_sectors);
3450        DEBUG_print("   log2_buffer_sectors %u\n", ic->log2_buffer_sectors);
3451
3452        if (recalculate && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) {
3453                ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3454                ic->sb->recalc_sector = cpu_to_le64(0);
3455        }
3456
3457        if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
3458                if (!ic->internal_hash) {
3459                        r = -EINVAL;
3460                        ti->error = "Recalculate is only valid with internal hash";
3461                        goto bad;
3462                }
3463                ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
3464                if (!ic->recalc_wq ) {
3465                        ti->error = "Cannot allocate workqueue";
3466                        r = -ENOMEM;
3467                        goto bad;
3468                }
3469                INIT_WORK(&ic->recalc_work, integrity_recalc);
3470                ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT);
3471                if (!ic->recalc_buffer) {
3472                        ti->error = "Cannot allocate buffer for recalculating";
3473                        r = -ENOMEM;
3474                        goto bad;
3475                }
3476                ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
3477                                                 ic->tag_size, GFP_KERNEL);
3478                if (!ic->recalc_tags) {
3479                        ti->error = "Cannot allocate tags for recalculating";
3480                        r = -ENOMEM;
3481                        goto bad;
3482                }
3483        }
3484
3485        ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
3486                        1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL);
3487        if (IS_ERR(ic->bufio)) {
3488                r = PTR_ERR(ic->bufio);
3489                ti->error = "Cannot initialize dm-bufio";
3490                ic->bufio = NULL;
3491                goto bad;
3492        }
3493        dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors);
3494
3495        if (ic->mode != 'R') {
3496                r = create_journal(ic, &ti->error);
3497                if (r)
3498                        goto bad;
3499        }
3500
3501        if (should_write_sb) {
3502                int r;
3503
3504                init_journal(ic, 0, ic->journal_sections, 0);
3505                r = dm_integrity_failed(ic);
3506                if (unlikely(r)) {
3507                        ti->error = "Error initializing journal";
3508                        goto bad;
3509                }
3510                r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3511                if (r) {
3512                        ti->error = "Error initializing superblock";
3513                        goto bad;
3514                }
3515                ic->just_formatted = true;
3516        }
3517
3518        if (!ic->meta_dev) {
3519                r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors);
3520                if (r)
3521                        goto bad;
3522        }
3523
3524        if (!ic->internal_hash)
3525                dm_integrity_set(ti, ic);
3526
3527        ti->num_flush_bios = 1;
3528        ti->flush_supported = true;
3529
3530        return 0;
3531bad:
3532        dm_integrity_dtr(ti);
3533        return r;
3534}
3535
3536static void dm_integrity_dtr(struct dm_target *ti)
3537{
3538        struct dm_integrity_c *ic = ti->private;
3539
3540        BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
3541        BUG_ON(!list_empty(&ic->wait_list));
3542
3543        if (ic->metadata_wq)
3544                destroy_workqueue(ic->metadata_wq);
3545        if (ic->wait_wq)
3546                destroy_workqueue(ic->wait_wq);
3547        if (ic->commit_wq)
3548                destroy_workqueue(ic->commit_wq);
3549        if (ic->writer_wq)
3550                destroy_workqueue(ic->writer_wq);
3551        if (ic->recalc_wq)
3552                destroy_workqueue(ic->recalc_wq);
3553        if (ic->recalc_buffer)
3554                vfree(ic->recalc_buffer);
3555        if (ic->recalc_tags)
3556                kvfree(ic->recalc_tags);
3557        if (ic->bufio)
3558                dm_bufio_client_destroy(ic->bufio);
3559        mempool_exit(&ic->journal_io_mempool);
3560        if (ic->io)
3561                dm_io_client_destroy(ic->io);
3562        if (ic->dev)
3563                dm_put_device(ti, ic->dev);
3564        if (ic->meta_dev)
3565                dm_put_device(ti, ic->meta_dev);
3566        dm_integrity_free_page_list(ic, ic->journal);
3567        dm_integrity_free_page_list(ic, ic->journal_io);
3568        dm_integrity_free_page_list(ic, ic->journal_xor);
3569        if (ic->journal_scatterlist)
3570                dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist);
3571        if (ic->journal_io_scatterlist)
3572                dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist);
3573        if (ic->sk_requests) {
3574                unsigned i;
3575
3576                for (i = 0; i < ic->journal_sections; i++) {
3577                        struct skcipher_request *req = ic->sk_requests[i];
3578                        if (req) {
3579                                kzfree(req->iv);
3580                                skcipher_request_free(req);
3581                        }
3582                }
3583                kvfree(ic->sk_requests);
3584        }
3585        kvfree(ic->journal_tree);
3586        if (ic->sb)
3587                free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT);
3588
3589        if (ic->internal_hash)
3590                crypto_free_shash(ic->internal_hash);
3591        free_alg(&ic->internal_hash_alg);
3592
3593        if (ic->journal_crypt)
3594                crypto_free_skcipher(ic->journal_crypt);
3595        free_alg(&ic->journal_crypt_alg);
3596
3597        if (ic->journal_mac)
3598                crypto_free_shash(ic->journal_mac);
3599        free_alg(&ic->journal_mac_alg);
3600
3601        kfree(ic);
3602}
3603
3604static struct target_type integrity_target = {
3605        .name                   = "integrity",
3606        .version                = {1, 2, 0},
3607        .module                 = THIS_MODULE,
3608        .features               = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
3609        .ctr                    = dm_integrity_ctr,
3610        .dtr                    = dm_integrity_dtr,
3611        .map                    = dm_integrity_map,
3612        .postsuspend            = dm_integrity_postsuspend,
3613        .resume                 = dm_integrity_resume,
3614        .status                 = dm_integrity_status,
3615        .iterate_devices        = dm_integrity_iterate_devices,
3616        .io_hints               = dm_integrity_io_hints,
3617};
3618
3619int __init dm_integrity_init(void)
3620{
3621        int r;
3622
3623        journal_io_cache = kmem_cache_create("integrity_journal_io",
3624                                             sizeof(struct journal_io), 0, 0, NULL);
3625        if (!journal_io_cache) {
3626                DMERR("can't allocate journal io cache");
3627                return -ENOMEM;
3628        }
3629
3630        r = dm_register_target(&integrity_target);
3631
3632        if (r < 0)
3633                DMERR("register failed %d", r);
3634
3635        return r;
3636}
3637
3638void dm_integrity_exit(void)
3639{
3640        dm_unregister_target(&integrity_target);
3641        kmem_cache_destroy(journal_io_cache);
3642}
3643
3644module_init(dm_integrity_init);
3645module_exit(dm_integrity_exit);
3646
3647MODULE_AUTHOR("Milan Broz");
3648MODULE_AUTHOR("Mikulas Patocka");
3649MODULE_DESCRIPTION(DM_NAME " target for integrity tags extension");
3650MODULE_LICENSE("GPL");
3651