linux/drivers/md/dm-integrity.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2016-2017 Red Hat, Inc. All rights reserved.
   3 * Copyright (C) 2016-2017 Milan Broz
   4 * Copyright (C) 2016-2017 Mikulas Patocka
   5 *
   6 * This file is released under the GPL.
   7 */
   8
   9#include "dm-bio-record.h"
  10
  11#include <linux/compiler.h>
  12#include <linux/module.h>
  13#include <linux/device-mapper.h>
  14#include <linux/dm-io.h>
  15#include <linux/vmalloc.h>
  16#include <linux/sort.h>
  17#include <linux/rbtree.h>
  18#include <linux/delay.h>
  19#include <linux/random.h>
  20#include <linux/reboot.h>
  21#include <crypto/hash.h>
  22#include <crypto/skcipher.h>
  23#include <linux/async_tx.h>
  24#include <linux/dm-bufio.h>
  25
  26#define DM_MSG_PREFIX "integrity"
  27
  28#define DEFAULT_INTERLEAVE_SECTORS      32768
  29#define DEFAULT_JOURNAL_SIZE_FACTOR     7
  30#define DEFAULT_SECTORS_PER_BITMAP_BIT  32768
  31#define DEFAULT_BUFFER_SECTORS          128
  32#define DEFAULT_JOURNAL_WATERMARK       50
  33#define DEFAULT_SYNC_MSEC               10000
  34#define DEFAULT_MAX_JOURNAL_SECTORS     131072
  35#define MIN_LOG2_INTERLEAVE_SECTORS     3
  36#define MAX_LOG2_INTERLEAVE_SECTORS     31
  37#define METADATA_WORKQUEUE_MAX_ACTIVE   16
  38#define RECALC_SECTORS                  8192
  39#define RECALC_WRITE_SUPER              16
  40#define BITMAP_BLOCK_SIZE               4096    /* don't change it */
  41#define BITMAP_FLUSH_INTERVAL           (10 * HZ)
  42#define DISCARD_FILLER                  0xf6
  43
  44/*
  45 * Warning - DEBUG_PRINT prints security-sensitive data to the log,
  46 * so it should not be enabled in the official kernel
  47 */
  48//#define DEBUG_PRINT
  49//#define INTERNAL_VERIFY
  50
  51/*
  52 * On disk structures
  53 */
  54
  55#define SB_MAGIC                        "integrt"
  56#define SB_VERSION_1                    1
  57#define SB_VERSION_2                    2
  58#define SB_VERSION_3                    3
  59#define SB_VERSION_4                    4
  60#define SB_SECTORS                      8
  61#define MAX_SECTORS_PER_BLOCK           8
  62
  63struct superblock {
  64        __u8 magic[8];
  65        __u8 version;
  66        __u8 log2_interleave_sectors;
  67        __u16 integrity_tag_size;
  68        __u32 journal_sections;
  69        __u64 provided_data_sectors;    /* userspace uses this value */
  70        __u32 flags;
  71        __u8 log2_sectors_per_block;
  72        __u8 log2_blocks_per_bitmap_bit;
  73        __u8 pad[2];
  74        __u64 recalc_sector;
  75};
  76
  77#define SB_FLAG_HAVE_JOURNAL_MAC        0x1
  78#define SB_FLAG_RECALCULATING           0x2
  79#define SB_FLAG_DIRTY_BITMAP            0x4
  80#define SB_FLAG_FIXED_PADDING           0x8
  81
  82#define JOURNAL_ENTRY_ROUNDUP           8
  83
  84typedef __u64 commit_id_t;
  85#define JOURNAL_MAC_PER_SECTOR          8
  86
  87struct journal_entry {
  88        union {
  89                struct {
  90                        __u32 sector_lo;
  91                        __u32 sector_hi;
  92                } s;
  93                __u64 sector;
  94        } u;
  95        commit_id_t last_bytes[];
  96        /* __u8 tag[0]; */
  97};
  98
  99#define journal_entry_tag(ic, je)               ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block])
 100
 101#if BITS_PER_LONG == 64
 102#define journal_entry_set_sector(je, x)         do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0)
 103#else
 104#define journal_entry_set_sector(je, x)         do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0)
 105#endif
 106#define journal_entry_get_sector(je)            le64_to_cpu((je)->u.sector)
 107#define journal_entry_is_unused(je)             ((je)->u.s.sector_hi == cpu_to_le32(-1))
 108#define journal_entry_set_unused(je)            do { ((je)->u.s.sector_hi = cpu_to_le32(-1)); } while (0)
 109#define journal_entry_is_inprogress(je)         ((je)->u.s.sector_hi == cpu_to_le32(-2))
 110#define journal_entry_set_inprogress(je)        do { ((je)->u.s.sector_hi = cpu_to_le32(-2)); } while (0)
 111
 112#define JOURNAL_BLOCK_SECTORS           8
 113#define JOURNAL_SECTOR_DATA             ((1 << SECTOR_SHIFT) - sizeof(commit_id_t))
 114#define JOURNAL_MAC_SIZE                (JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS)
 115
 116struct journal_sector {
 117        __u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR];
 118        __u8 mac[JOURNAL_MAC_PER_SECTOR];
 119        commit_id_t commit_id;
 120};
 121
 122#define MAX_TAG_SIZE                    (JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK]))
 123
 124#define METADATA_PADDING_SECTORS        8
 125
 126#define N_COMMIT_IDS                    4
 127
 128static unsigned char prev_commit_seq(unsigned char seq)
 129{
 130        return (seq + N_COMMIT_IDS - 1) % N_COMMIT_IDS;
 131}
 132
 133static unsigned char next_commit_seq(unsigned char seq)
 134{
 135        return (seq + 1) % N_COMMIT_IDS;
 136}
 137
 138/*
 139 * In-memory structures
 140 */
 141
 142struct journal_node {
 143        struct rb_node node;
 144        sector_t sector;
 145};
 146
 147struct alg_spec {
 148        char *alg_string;
 149        char *key_string;
 150        __u8 *key;
 151        unsigned key_size;
 152};
 153
 154struct dm_integrity_c {
 155        struct dm_dev *dev;
 156        struct dm_dev *meta_dev;
 157        unsigned tag_size;
 158        __s8 log2_tag_size;
 159        sector_t start;
 160        mempool_t journal_io_mempool;
 161        struct dm_io_client *io;
 162        struct dm_bufio_client *bufio;
 163        struct workqueue_struct *metadata_wq;
 164        struct superblock *sb;
 165        unsigned journal_pages;
 166        unsigned n_bitmap_blocks;
 167
 168        struct page_list *journal;
 169        struct page_list *journal_io;
 170        struct page_list *journal_xor;
 171        struct page_list *recalc_bitmap;
 172        struct page_list *may_write_bitmap;
 173        struct bitmap_block_status *bbs;
 174        unsigned bitmap_flush_interval;
 175        int synchronous_mode;
 176        struct bio_list synchronous_bios;
 177        struct delayed_work bitmap_flush_work;
 178
 179        struct crypto_skcipher *journal_crypt;
 180        struct scatterlist **journal_scatterlist;
 181        struct scatterlist **journal_io_scatterlist;
 182        struct skcipher_request **sk_requests;
 183
 184        struct crypto_shash *journal_mac;
 185
 186        struct journal_node *journal_tree;
 187        struct rb_root journal_tree_root;
 188
 189        sector_t provided_data_sectors;
 190
 191        unsigned short journal_entry_size;
 192        unsigned char journal_entries_per_sector;
 193        unsigned char journal_section_entries;
 194        unsigned short journal_section_sectors;
 195        unsigned journal_sections;
 196        unsigned journal_entries;
 197        sector_t data_device_sectors;
 198        sector_t meta_device_sectors;
 199        unsigned initial_sectors;
 200        unsigned metadata_run;
 201        __s8 log2_metadata_run;
 202        __u8 log2_buffer_sectors;
 203        __u8 sectors_per_block;
 204        __u8 log2_blocks_per_bitmap_bit;
 205
 206        unsigned char mode;
 207
 208        int failed;
 209
 210        struct crypto_shash *internal_hash;
 211
 212        struct dm_target *ti;
 213
 214        /* these variables are locked with endio_wait.lock */
 215        struct rb_root in_progress;
 216        struct list_head wait_list;
 217        wait_queue_head_t endio_wait;
 218        struct workqueue_struct *wait_wq;
 219        struct workqueue_struct *offload_wq;
 220
 221        unsigned char commit_seq;
 222        commit_id_t commit_ids[N_COMMIT_IDS];
 223
 224        unsigned committed_section;
 225        unsigned n_committed_sections;
 226
 227        unsigned uncommitted_section;
 228        unsigned n_uncommitted_sections;
 229
 230        unsigned free_section;
 231        unsigned char free_section_entry;
 232        unsigned free_sectors;
 233
 234        unsigned free_sectors_threshold;
 235
 236        struct workqueue_struct *commit_wq;
 237        struct work_struct commit_work;
 238
 239        struct workqueue_struct *writer_wq;
 240        struct work_struct writer_work;
 241
 242        struct workqueue_struct *recalc_wq;
 243        struct work_struct recalc_work;
 244        u8 *recalc_buffer;
 245        u8 *recalc_tags;
 246
 247        struct bio_list flush_bio_list;
 248
 249        unsigned long autocommit_jiffies;
 250        struct timer_list autocommit_timer;
 251        unsigned autocommit_msec;
 252
 253        wait_queue_head_t copy_to_journal_wait;
 254
 255        struct completion crypto_backoff;
 256
 257        bool journal_uptodate;
 258        bool just_formatted;
 259        bool recalculate_flag;
 260        bool discard;
 261        bool fix_padding;
 262        bool legacy_recalculate;
 263
 264        struct alg_spec internal_hash_alg;
 265        struct alg_spec journal_crypt_alg;
 266        struct alg_spec journal_mac_alg;
 267
 268        atomic64_t number_of_mismatches;
 269
 270        struct notifier_block reboot_notifier;
 271};
 272
 273struct dm_integrity_range {
 274        sector_t logical_sector;
 275        sector_t n_sectors;
 276        bool waiting;
 277        union {
 278                struct rb_node node;
 279                struct {
 280                        struct task_struct *task;
 281                        struct list_head wait_entry;
 282                };
 283        };
 284};
 285
 286struct dm_integrity_io {
 287        struct work_struct work;
 288
 289        struct dm_integrity_c *ic;
 290        enum req_opf op;
 291        bool fua;
 292
 293        struct dm_integrity_range range;
 294
 295        sector_t metadata_block;
 296        unsigned metadata_offset;
 297
 298        atomic_t in_flight;
 299        blk_status_t bi_status;
 300
 301        struct completion *completion;
 302
 303        struct dm_bio_details bio_details;
 304};
 305
 306struct journal_completion {
 307        struct dm_integrity_c *ic;
 308        atomic_t in_flight;
 309        struct completion comp;
 310};
 311
 312struct journal_io {
 313        struct dm_integrity_range range;
 314        struct journal_completion *comp;
 315};
 316
 317struct bitmap_block_status {
 318        struct work_struct work;
 319        struct dm_integrity_c *ic;
 320        unsigned idx;
 321        unsigned long *bitmap;
 322        struct bio_list bio_queue;
 323        spinlock_t bio_queue_lock;
 324
 325};
 326
 327static struct kmem_cache *journal_io_cache;
 328
 329#define JOURNAL_IO_MEMPOOL      32
 330
 331#ifdef DEBUG_PRINT
 332#define DEBUG_print(x, ...)     printk(KERN_DEBUG x, ##__VA_ARGS__)
 333static void __DEBUG_bytes(__u8 *bytes, size_t len, const char *msg, ...)
 334{
 335        va_list args;
 336        va_start(args, msg);
 337        vprintk(msg, args);
 338        va_end(args);
 339        if (len)
 340                pr_cont(":");
 341        while (len) {
 342                pr_cont(" %02x", *bytes);
 343                bytes++;
 344                len--;
 345        }
 346        pr_cont("\n");
 347}
 348#define DEBUG_bytes(bytes, len, msg, ...)       __DEBUG_bytes(bytes, len, KERN_DEBUG msg, ##__VA_ARGS__)
 349#else
 350#define DEBUG_print(x, ...)                     do { } while (0)
 351#define DEBUG_bytes(bytes, len, msg, ...)       do { } while (0)
 352#endif
 353
 354static void dm_integrity_prepare(struct request *rq)
 355{
 356}
 357
 358static void dm_integrity_complete(struct request *rq, unsigned int nr_bytes)
 359{
 360}
 361
 362/*
 363 * DM Integrity profile, protection is performed layer above (dm-crypt)
 364 */
 365static const struct blk_integrity_profile dm_integrity_profile = {
 366        .name                   = "DM-DIF-EXT-TAG",
 367        .generate_fn            = NULL,
 368        .verify_fn              = NULL,
 369        .prepare_fn             = dm_integrity_prepare,
 370        .complete_fn            = dm_integrity_complete,
 371};
 372
 373static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);
 374static void integrity_bio_wait(struct work_struct *w);
 375static void dm_integrity_dtr(struct dm_target *ti);
 376
 377static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err)
 378{
 379        if (err == -EILSEQ)
 380                atomic64_inc(&ic->number_of_mismatches);
 381        if (!cmpxchg(&ic->failed, 0, err))
 382                DMERR("Error on %s: %d", msg, err);
 383}
 384
 385static int dm_integrity_failed(struct dm_integrity_c *ic)
 386{
 387        return READ_ONCE(ic->failed);
 388}
 389
 390static bool dm_integrity_disable_recalculate(struct dm_integrity_c *ic)
 391{
 392        if ((ic->internal_hash_alg.key || ic->journal_mac_alg.key) &&
 393            !ic->legacy_recalculate)
 394                return true;
 395        return false;
 396}
 397
 398static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
 399                                          unsigned j, unsigned char seq)
 400{
 401        /*
 402         * Xor the number with section and sector, so that if a piece of
 403         * journal is written at wrong place, it is detected.
 404         */
 405        return ic->commit_ids[seq] ^ cpu_to_le64(((__u64)i << 32) ^ j);
 406}
 407
 408static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
 409                                sector_t *area, sector_t *offset)
 410{
 411        if (!ic->meta_dev) {
 412                __u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors;
 413                *area = data_sector >> log2_interleave_sectors;
 414                *offset = (unsigned)data_sector & ((1U << log2_interleave_sectors) - 1);
 415        } else {
 416                *area = 0;
 417                *offset = data_sector;
 418        }
 419}
 420
 421#define sector_to_block(ic, n)                                          \
 422do {                                                                    \
 423        BUG_ON((n) & (unsigned)((ic)->sectors_per_block - 1));          \
 424        (n) >>= (ic)->sb->log2_sectors_per_block;                       \
 425} while (0)
 426
 427static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area,
 428                                            sector_t offset, unsigned *metadata_offset)
 429{
 430        __u64 ms;
 431        unsigned mo;
 432
 433        ms = area << ic->sb->log2_interleave_sectors;
 434        if (likely(ic->log2_metadata_run >= 0))
 435                ms += area << ic->log2_metadata_run;
 436        else
 437                ms += area * ic->metadata_run;
 438        ms >>= ic->log2_buffer_sectors;
 439
 440        sector_to_block(ic, offset);
 441
 442        if (likely(ic->log2_tag_size >= 0)) {
 443                ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size);
 444                mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
 445        } else {
 446                ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors);
 447                mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
 448        }
 449        *metadata_offset = mo;
 450        return ms;
 451}
 452
 453static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset)
 454{
 455        sector_t result;
 456
 457        if (ic->meta_dev)
 458                return offset;
 459
 460        result = area << ic->sb->log2_interleave_sectors;
 461        if (likely(ic->log2_metadata_run >= 0))
 462                result += (area + 1) << ic->log2_metadata_run;
 463        else
 464                result += (area + 1) * ic->metadata_run;
 465
 466        result += (sector_t)ic->initial_sectors + offset;
 467        result += ic->start;
 468
 469        return result;
 470}
 471
 472static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr)
 473{
 474        if (unlikely(*sec_ptr >= ic->journal_sections))
 475                *sec_ptr -= ic->journal_sections;
 476}
 477
 478static void sb_set_version(struct dm_integrity_c *ic)
 479{
 480        if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING))
 481                ic->sb->version = SB_VERSION_4;
 482        else if (ic->mode == 'B' || ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP))
 483                ic->sb->version = SB_VERSION_3;
 484        else if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
 485                ic->sb->version = SB_VERSION_2;
 486        else
 487                ic->sb->version = SB_VERSION_1;
 488}
 489
 490static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags)
 491{
 492        struct dm_io_request io_req;
 493        struct dm_io_region io_loc;
 494
 495        io_req.bi_op = op;
 496        io_req.bi_op_flags = op_flags;
 497        io_req.mem.type = DM_IO_KMEM;
 498        io_req.mem.ptr.addr = ic->sb;
 499        io_req.notify.fn = NULL;
 500        io_req.client = ic->io;
 501        io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
 502        io_loc.sector = ic->start;
 503        io_loc.count = SB_SECTORS;
 504
 505        if (op == REQ_OP_WRITE)
 506                sb_set_version(ic);
 507
 508        return dm_io(&io_req, 1, &io_loc, NULL);
 509}
 510
 511#define BITMAP_OP_TEST_ALL_SET          0
 512#define BITMAP_OP_TEST_ALL_CLEAR        1
 513#define BITMAP_OP_SET                   2
 514#define BITMAP_OP_CLEAR                 3
 515
 516static bool block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap,
 517                            sector_t sector, sector_t n_sectors, int mode)
 518{
 519        unsigned long bit, end_bit, this_end_bit, page, end_page;
 520        unsigned long *data;
 521
 522        if (unlikely(((sector | n_sectors) & ((1 << ic->sb->log2_sectors_per_block) - 1)) != 0)) {
 523                DMCRIT("invalid bitmap access (%llx,%llx,%d,%d,%d)",
 524                        sector,
 525                        n_sectors,
 526                        ic->sb->log2_sectors_per_block,
 527                        ic->log2_blocks_per_bitmap_bit,
 528                        mode);
 529                BUG();
 530        }
 531
 532        if (unlikely(!n_sectors))
 533                return true;
 534
 535        bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
 536        end_bit = (sector + n_sectors - 1) >>
 537                (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
 538
 539        page = bit / (PAGE_SIZE * 8);
 540        bit %= PAGE_SIZE * 8;
 541
 542        end_page = end_bit / (PAGE_SIZE * 8);
 543        end_bit %= PAGE_SIZE * 8;
 544
 545repeat:
 546        if (page < end_page) {
 547                this_end_bit = PAGE_SIZE * 8 - 1;
 548        } else {
 549                this_end_bit = end_bit;
 550        }
 551
 552        data = lowmem_page_address(bitmap[page].page);
 553
 554        if (mode == BITMAP_OP_TEST_ALL_SET) {
 555                while (bit <= this_end_bit) {
 556                        if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
 557                                do {
 558                                        if (data[bit / BITS_PER_LONG] != -1)
 559                                                return false;
 560                                        bit += BITS_PER_LONG;
 561                                } while (this_end_bit >= bit + BITS_PER_LONG - 1);
 562                                continue;
 563                        }
 564                        if (!test_bit(bit, data))
 565                                return false;
 566                        bit++;
 567                }
 568        } else if (mode == BITMAP_OP_TEST_ALL_CLEAR) {
 569                while (bit <= this_end_bit) {
 570                        if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
 571                                do {
 572                                        if (data[bit / BITS_PER_LONG] != 0)
 573                                                return false;
 574                                        bit += BITS_PER_LONG;
 575                                } while (this_end_bit >= bit + BITS_PER_LONG - 1);
 576                                continue;
 577                        }
 578                        if (test_bit(bit, data))
 579                                return false;
 580                        bit++;
 581                }
 582        } else if (mode == BITMAP_OP_SET) {
 583                while (bit <= this_end_bit) {
 584                        if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
 585                                do {
 586                                        data[bit / BITS_PER_LONG] = -1;
 587                                        bit += BITS_PER_LONG;
 588                                } while (this_end_bit >= bit + BITS_PER_LONG - 1);
 589                                continue;
 590                        }
 591                        __set_bit(bit, data);
 592                        bit++;
 593                }
 594        } else if (mode == BITMAP_OP_CLEAR) {
 595                if (!bit && this_end_bit == PAGE_SIZE * 8 - 1)
 596                        clear_page(data);
 597                else while (bit <= this_end_bit) {
 598                        if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
 599                                do {
 600                                        data[bit / BITS_PER_LONG] = 0;
 601                                        bit += BITS_PER_LONG;
 602                                } while (this_end_bit >= bit + BITS_PER_LONG - 1);
 603                                continue;
 604                        }
 605                        __clear_bit(bit, data);
 606                        bit++;
 607                }
 608        } else {
 609                BUG();
 610        }
 611
 612        if (unlikely(page < end_page)) {
 613                bit = 0;
 614                page++;
 615                goto repeat;
 616        }
 617
 618        return true;
 619}
 620
 621static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst, struct page_list *src)
 622{
 623        unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
 624        unsigned i;
 625
 626        for (i = 0; i < n_bitmap_pages; i++) {
 627                unsigned long *dst_data = lowmem_page_address(dst[i].page);
 628                unsigned long *src_data = lowmem_page_address(src[i].page);
 629                copy_page(dst_data, src_data);
 630        }
 631}
 632
 633static struct bitmap_block_status *sector_to_bitmap_block(struct dm_integrity_c *ic, sector_t sector)
 634{
 635        unsigned bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
 636        unsigned bitmap_block = bit / (BITMAP_BLOCK_SIZE * 8);
 637
 638        BUG_ON(bitmap_block >= ic->n_bitmap_blocks);
 639        return &ic->bbs[bitmap_block];
 640}
 641
 642static void access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset,
 643                                 bool e, const char *function)
 644{
 645#if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY)
 646        unsigned limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
 647
 648        if (unlikely(section >= ic->journal_sections) ||
 649            unlikely(offset >= limit)) {
 650                DMCRIT("%s: invalid access at (%u,%u), limit (%u,%u)",
 651                       function, section, offset, ic->journal_sections, limit);
 652                BUG();
 653        }
 654#endif
 655}
 656
 657static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsigned offset,
 658                               unsigned *pl_index, unsigned *pl_offset)
 659{
 660        unsigned sector;
 661
 662        access_journal_check(ic, section, offset, false, "page_list_location");
 663
 664        sector = section * ic->journal_section_sectors + offset;
 665
 666        *pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
 667        *pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
 668}
 669
 670static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl,
 671                                               unsigned section, unsigned offset, unsigned *n_sectors)
 672{
 673        unsigned pl_index, pl_offset;
 674        char *va;
 675
 676        page_list_location(ic, section, offset, &pl_index, &pl_offset);
 677
 678        if (n_sectors)
 679                *n_sectors = (PAGE_SIZE - pl_offset) >> SECTOR_SHIFT;
 680
 681        va = lowmem_page_address(pl[pl_index].page);
 682
 683        return (struct journal_sector *)(va + pl_offset);
 684}
 685
 686static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset)
 687{
 688        return access_page_list(ic, ic->journal, section, offset, NULL);
 689}
 690
 691static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned section, unsigned n)
 692{
 693        unsigned rel_sector, offset;
 694        struct journal_sector *js;
 695
 696        access_journal_check(ic, section, n, true, "access_journal_entry");
 697
 698        rel_sector = n % JOURNAL_BLOCK_SECTORS;
 699        offset = n / JOURNAL_BLOCK_SECTORS;
 700
 701        js = access_journal(ic, section, rel_sector);
 702        return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size);
 703}
 704
 705static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned section, unsigned n)
 706{
 707        n <<= ic->sb->log2_sectors_per_block;
 708
 709        n += JOURNAL_BLOCK_SECTORS;
 710
 711        access_journal_check(ic, section, n, false, "access_journal_data");
 712
 713        return access_journal(ic, section, n);
 714}
 715
 716static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result[JOURNAL_MAC_SIZE])
 717{
 718        SHASH_DESC_ON_STACK(desc, ic->journal_mac);
 719        int r;
 720        unsigned j, size;
 721
 722        desc->tfm = ic->journal_mac;
 723
 724        r = crypto_shash_init(desc);
 725        if (unlikely(r)) {
 726                dm_integrity_io_error(ic, "crypto_shash_init", r);
 727                goto err;
 728        }
 729
 730        for (j = 0; j < ic->journal_section_entries; j++) {
 731                struct journal_entry *je = access_journal_entry(ic, section, j);
 732                r = crypto_shash_update(desc, (__u8 *)&je->u.sector, sizeof je->u.sector);
 733                if (unlikely(r)) {
 734                        dm_integrity_io_error(ic, "crypto_shash_update", r);
 735                        goto err;
 736                }
 737        }
 738
 739        size = crypto_shash_digestsize(ic->journal_mac);
 740
 741        if (likely(size <= JOURNAL_MAC_SIZE)) {
 742                r = crypto_shash_final(desc, result);
 743                if (unlikely(r)) {
 744                        dm_integrity_io_error(ic, "crypto_shash_final", r);
 745                        goto err;
 746                }
 747                memset(result + size, 0, JOURNAL_MAC_SIZE - size);
 748        } else {
 749                __u8 digest[HASH_MAX_DIGESTSIZE];
 750
 751                if (WARN_ON(size > sizeof(digest))) {
 752                        dm_integrity_io_error(ic, "digest_size", -EINVAL);
 753                        goto err;
 754                }
 755                r = crypto_shash_final(desc, digest);
 756                if (unlikely(r)) {
 757                        dm_integrity_io_error(ic, "crypto_shash_final", r);
 758                        goto err;
 759                }
 760                memcpy(result, digest, JOURNAL_MAC_SIZE);
 761        }
 762
 763        return;
 764err:
 765        memset(result, 0, JOURNAL_MAC_SIZE);
 766}
 767
 768static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr)
 769{
 770        __u8 result[JOURNAL_MAC_SIZE];
 771        unsigned j;
 772
 773        if (!ic->journal_mac)
 774                return;
 775
 776        section_mac(ic, section, result);
 777
 778        for (j = 0; j < JOURNAL_BLOCK_SECTORS; j++) {
 779                struct journal_sector *js = access_journal(ic, section, j);
 780
 781                if (likely(wr))
 782                        memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR);
 783                else {
 784                        if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR))
 785                                dm_integrity_io_error(ic, "journal mac", -EILSEQ);
 786                }
 787        }
 788}
 789
 790static void complete_journal_op(void *context)
 791{
 792        struct journal_completion *comp = context;
 793        BUG_ON(!atomic_read(&comp->in_flight));
 794        if (likely(atomic_dec_and_test(&comp->in_flight)))
 795                complete(&comp->comp);
 796}
 797
 798static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
 799                        unsigned n_sections, struct journal_completion *comp)
 800{
 801        struct async_submit_ctl submit;
 802        size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT;
 803        unsigned pl_index, pl_offset, section_index;
 804        struct page_list *source_pl, *target_pl;
 805
 806        if (likely(encrypt)) {
 807                source_pl = ic->journal;
 808                target_pl = ic->journal_io;
 809        } else {
 810                source_pl = ic->journal_io;
 811                target_pl = ic->journal;
 812        }
 813
 814        page_list_location(ic, section, 0, &pl_index, &pl_offset);
 815
 816        atomic_add(roundup(pl_offset + n_bytes, PAGE_SIZE) >> PAGE_SHIFT, &comp->in_flight);
 817
 818        init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, complete_journal_op, comp, NULL);
 819
 820        section_index = pl_index;
 821
 822        do {
 823                size_t this_step;
 824                struct page *src_pages[2];
 825                struct page *dst_page;
 826
 827                while (unlikely(pl_index == section_index)) {
 828                        unsigned dummy;
 829                        if (likely(encrypt))
 830                                rw_section_mac(ic, section, true);
 831                        section++;
 832                        n_sections--;
 833                        if (!n_sections)
 834                                break;
 835                        page_list_location(ic, section, 0, &section_index, &dummy);
 836                }
 837
 838                this_step = min(n_bytes, (size_t)PAGE_SIZE - pl_offset);
 839                dst_page = target_pl[pl_index].page;
 840                src_pages[0] = source_pl[pl_index].page;
 841                src_pages[1] = ic->journal_xor[pl_index].page;
 842
 843                async_xor(dst_page, src_pages, pl_offset, 2, this_step, &submit);
 844
 845                pl_index++;
 846                pl_offset = 0;
 847                n_bytes -= this_step;
 848        } while (n_bytes);
 849
 850        BUG_ON(n_sections);
 851
 852        async_tx_issue_pending_all();
 853}
 854
 855static void complete_journal_encrypt(struct crypto_async_request *req, int err)
 856{
 857        struct journal_completion *comp = req->data;
 858        if (unlikely(err)) {
 859                if (likely(err == -EINPROGRESS)) {
 860                        complete(&comp->ic->crypto_backoff);
 861                        return;
 862                }
 863                dm_integrity_io_error(comp->ic, "asynchronous encrypt", err);
 864        }
 865        complete_journal_op(comp);
 866}
 867
 868static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
 869{
 870        int r;
 871        skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
 872                                      complete_journal_encrypt, comp);
 873        if (likely(encrypt))
 874                r = crypto_skcipher_encrypt(req);
 875        else
 876                r = crypto_skcipher_decrypt(req);
 877        if (likely(!r))
 878                return false;
 879        if (likely(r == -EINPROGRESS))
 880                return true;
 881        if (likely(r == -EBUSY)) {
 882                wait_for_completion(&comp->ic->crypto_backoff);
 883                reinit_completion(&comp->ic->crypto_backoff);
 884                return true;
 885        }
 886        dm_integrity_io_error(comp->ic, "encrypt", r);
 887        return false;
 888}
 889
 890static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
 891                          unsigned n_sections, struct journal_completion *comp)
 892{
 893        struct scatterlist **source_sg;
 894        struct scatterlist **target_sg;
 895
 896        atomic_add(2, &comp->in_flight);
 897
 898        if (likely(encrypt)) {
 899                source_sg = ic->journal_scatterlist;
 900                target_sg = ic->journal_io_scatterlist;
 901        } else {
 902                source_sg = ic->journal_io_scatterlist;
 903                target_sg = ic->journal_scatterlist;
 904        }
 905
 906        do {
 907                struct skcipher_request *req;
 908                unsigned ivsize;
 909                char *iv;
 910
 911                if (likely(encrypt))
 912                        rw_section_mac(ic, section, true);
 913
 914                req = ic->sk_requests[section];
 915                ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
 916                iv = req->iv;
 917
 918                memcpy(iv, iv + ivsize, ivsize);
 919
 920                req->src = source_sg[section];
 921                req->dst = target_sg[section];
 922
 923                if (unlikely(do_crypt(encrypt, req, comp)))
 924                        atomic_inc(&comp->in_flight);
 925
 926                section++;
 927                n_sections--;
 928        } while (n_sections);
 929
 930        atomic_dec(&comp->in_flight);
 931        complete_journal_op(comp);
 932}
 933
 934static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
 935                            unsigned n_sections, struct journal_completion *comp)
 936{
 937        if (ic->journal_xor)
 938                return xor_journal(ic, encrypt, section, n_sections, comp);
 939        else
 940                return crypt_journal(ic, encrypt, section, n_sections, comp);
 941}
 942
 943static void complete_journal_io(unsigned long error, void *context)
 944{
 945        struct journal_completion *comp = context;
 946        if (unlikely(error != 0))
 947                dm_integrity_io_error(comp->ic, "writing journal", -EIO);
 948        complete_journal_op(comp);
 949}
 950
 951static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags,
 952                               unsigned sector, unsigned n_sectors, struct journal_completion *comp)
 953{
 954        struct dm_io_request io_req;
 955        struct dm_io_region io_loc;
 956        unsigned pl_index, pl_offset;
 957        int r;
 958
 959        if (unlikely(dm_integrity_failed(ic))) {
 960                if (comp)
 961                        complete_journal_io(-1UL, comp);
 962                return;
 963        }
 964
 965        pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
 966        pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
 967
 968        io_req.bi_op = op;
 969        io_req.bi_op_flags = op_flags;
 970        io_req.mem.type = DM_IO_PAGE_LIST;
 971        if (ic->journal_io)
 972                io_req.mem.ptr.pl = &ic->journal_io[pl_index];
 973        else
 974                io_req.mem.ptr.pl = &ic->journal[pl_index];
 975        io_req.mem.offset = pl_offset;
 976        if (likely(comp != NULL)) {
 977                io_req.notify.fn = complete_journal_io;
 978                io_req.notify.context = comp;
 979        } else {
 980                io_req.notify.fn = NULL;
 981        }
 982        io_req.client = ic->io;
 983        io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
 984        io_loc.sector = ic->start + SB_SECTORS + sector;
 985        io_loc.count = n_sectors;
 986
 987        r = dm_io(&io_req, 1, &io_loc, NULL);
 988        if (unlikely(r)) {
 989                dm_integrity_io_error(ic, op == REQ_OP_READ ? "reading journal" : "writing journal", r);
 990                if (comp) {
 991                        WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
 992                        complete_journal_io(-1UL, comp);
 993                }
 994        }
 995}
 996
 997static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section,
 998                       unsigned n_sections, struct journal_completion *comp)
 999{
1000        unsigned sector, n_sectors;
1001
1002        sector = section * ic->journal_section_sectors;
1003        n_sectors = n_sections * ic->journal_section_sectors;
1004
1005        rw_journal_sectors(ic, op, op_flags, sector, n_sectors, comp);
1006}
1007
1008static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections)
1009{
1010        struct journal_completion io_comp;
1011        struct journal_completion crypt_comp_1;
1012        struct journal_completion crypt_comp_2;
1013        unsigned i;
1014
1015        io_comp.ic = ic;
1016        init_completion(&io_comp.comp);
1017
1018        if (commit_start + commit_sections <= ic->journal_sections) {
1019                io_comp.in_flight = (atomic_t)ATOMIC_INIT(1);
1020                if (ic->journal_io) {
1021                        crypt_comp_1.ic = ic;
1022                        init_completion(&crypt_comp_1.comp);
1023                        crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1024                        encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1);
1025                        wait_for_completion_io(&crypt_comp_1.comp);
1026                } else {
1027                        for (i = 0; i < commit_sections; i++)
1028                                rw_section_mac(ic, commit_start + i, true);
1029                }
1030                rw_journal(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, commit_start,
1031                           commit_sections, &io_comp);
1032        } else {
1033                unsigned to_end;
1034                io_comp.in_flight = (atomic_t)ATOMIC_INIT(2);
1035                to_end = ic->journal_sections - commit_start;
1036                if (ic->journal_io) {
1037                        crypt_comp_1.ic = ic;
1038                        init_completion(&crypt_comp_1.comp);
1039                        crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1040                        encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1);
1041                        if (try_wait_for_completion(&crypt_comp_1.comp)) {
1042                                rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
1043                                reinit_completion(&crypt_comp_1.comp);
1044                                crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1045                                encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1);
1046                                wait_for_completion_io(&crypt_comp_1.comp);
1047                        } else {
1048                                crypt_comp_2.ic = ic;
1049                                init_completion(&crypt_comp_2.comp);
1050                                crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0);
1051                                encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2);
1052                                wait_for_completion_io(&crypt_comp_1.comp);
1053                                rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
1054                                wait_for_completion_io(&crypt_comp_2.comp);
1055                        }
1056                } else {
1057                        for (i = 0; i < to_end; i++)
1058                                rw_section_mac(ic, commit_start + i, true);
1059                        rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
1060                        for (i = 0; i < commit_sections - to_end; i++)
1061                                rw_section_mac(ic, i, true);
1062                }
1063                rw_journal(ic, REQ_OP_WRITE, REQ_FUA, 0, commit_sections - to_end, &io_comp);
1064        }
1065
1066        wait_for_completion_io(&io_comp.comp);
1067}
1068
1069static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset,
1070                              unsigned n_sectors, sector_t target, io_notify_fn fn, void *data)
1071{
1072        struct dm_io_request io_req;
1073        struct dm_io_region io_loc;
1074        int r;
1075        unsigned sector, pl_index, pl_offset;
1076
1077        BUG_ON((target | n_sectors | offset) & (unsigned)(ic->sectors_per_block - 1));
1078
1079        if (unlikely(dm_integrity_failed(ic))) {
1080                fn(-1UL, data);
1081                return;
1082        }
1083
1084        sector = section * ic->journal_section_sectors + JOURNAL_BLOCK_SECTORS + offset;
1085
1086        pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
1087        pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
1088
1089        io_req.bi_op = REQ_OP_WRITE;
1090        io_req.bi_op_flags = 0;
1091        io_req.mem.type = DM_IO_PAGE_LIST;
1092        io_req.mem.ptr.pl = &ic->journal[pl_index];
1093        io_req.mem.offset = pl_offset;
1094        io_req.notify.fn = fn;
1095        io_req.notify.context = data;
1096        io_req.client = ic->io;
1097        io_loc.bdev = ic->dev->bdev;
1098        io_loc.sector = target;
1099        io_loc.count = n_sectors;
1100
1101        r = dm_io(&io_req, 1, &io_loc, NULL);
1102        if (unlikely(r)) {
1103                WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
1104                fn(-1UL, data);
1105        }
1106}
1107
1108static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)
1109{
1110        return range1->logical_sector < range2->logical_sector + range2->n_sectors &&
1111               range1->logical_sector + range1->n_sectors > range2->logical_sector;
1112}
1113
1114static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
1115{
1116        struct rb_node **n = &ic->in_progress.rb_node;
1117        struct rb_node *parent;
1118
1119        BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned)(ic->sectors_per_block - 1));
1120
1121        if (likely(check_waiting)) {
1122                struct dm_integrity_range *range;
1123                list_for_each_entry(range, &ic->wait_list, wait_entry) {
1124                        if (unlikely(ranges_overlap(range, new_range)))
1125                                return false;
1126                }
1127        }
1128
1129        parent = NULL;
1130
1131        while (*n) {
1132                struct dm_integrity_range *range = container_of(*n, struct dm_integrity_range, node);
1133
1134                parent = *n;
1135                if (new_range->logical_sector + new_range->n_sectors <= range->logical_sector) {
1136                        n = &range->node.rb_left;
1137                } else if (new_range->logical_sector >= range->logical_sector + range->n_sectors) {
1138                        n = &range->node.rb_right;
1139                } else {
1140                        return false;
1141                }
1142        }
1143
1144        rb_link_node(&new_range->node, parent, n);
1145        rb_insert_color(&new_range->node, &ic->in_progress);
1146
1147        return true;
1148}
1149
1150static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range)
1151{
1152        rb_erase(&range->node, &ic->in_progress);
1153        while (unlikely(!list_empty(&ic->wait_list))) {
1154                struct dm_integrity_range *last_range =
1155                        list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
1156                struct task_struct *last_range_task;
1157                last_range_task = last_range->task;
1158                list_del(&last_range->wait_entry);
1159                if (!add_new_range(ic, last_range, false)) {
1160                        last_range->task = last_range_task;
1161                        list_add(&last_range->wait_entry, &ic->wait_list);
1162                        break;
1163                }
1164                last_range->waiting = false;
1165                wake_up_process(last_range_task);
1166        }
1167}
1168
1169static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range)
1170{
1171        unsigned long flags;
1172
1173        spin_lock_irqsave(&ic->endio_wait.lock, flags);
1174        remove_range_unlocked(ic, range);
1175        spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1176}
1177
1178static void wait_and_add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
1179{
1180        new_range->waiting = true;
1181        list_add_tail(&new_range->wait_entry, &ic->wait_list);
1182        new_range->task = current;
1183        do {
1184                __set_current_state(TASK_UNINTERRUPTIBLE);
1185                spin_unlock_irq(&ic->endio_wait.lock);
1186                io_schedule();
1187                spin_lock_irq(&ic->endio_wait.lock);
1188        } while (unlikely(new_range->waiting));
1189}
1190
1191static void add_new_range_and_wait(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
1192{
1193        if (unlikely(!add_new_range(ic, new_range, true)))
1194                wait_and_add_new_range(ic, new_range);
1195}
1196
1197static void init_journal_node(struct journal_node *node)
1198{
1199        RB_CLEAR_NODE(&node->node);
1200        node->sector = (sector_t)-1;
1201}
1202
1203static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector)
1204{
1205        struct rb_node **link;
1206        struct rb_node *parent;
1207
1208        node->sector = sector;
1209        BUG_ON(!RB_EMPTY_NODE(&node->node));
1210
1211        link = &ic->journal_tree_root.rb_node;
1212        parent = NULL;
1213
1214        while (*link) {
1215                struct journal_node *j;
1216                parent = *link;
1217                j = container_of(parent, struct journal_node, node);
1218                if (sector < j->sector)
1219                        link = &j->node.rb_left;
1220                else
1221                        link = &j->node.rb_right;
1222        }
1223
1224        rb_link_node(&node->node, parent, link);
1225        rb_insert_color(&node->node, &ic->journal_tree_root);
1226}
1227
1228static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node)
1229{
1230        BUG_ON(RB_EMPTY_NODE(&node->node));
1231        rb_erase(&node->node, &ic->journal_tree_root);
1232        init_journal_node(node);
1233}
1234
1235#define NOT_FOUND       (-1U)
1236
1237static unsigned find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)
1238{
1239        struct rb_node *n = ic->journal_tree_root.rb_node;
1240        unsigned found = NOT_FOUND;
1241        *next_sector = (sector_t)-1;
1242        while (n) {
1243                struct journal_node *j = container_of(n, struct journal_node, node);
1244                if (sector == j->sector) {
1245                        found = j - ic->journal_tree;
1246                }
1247                if (sector < j->sector) {
1248                        *next_sector = j->sector;
1249                        n = j->node.rb_left;
1250                } else {
1251                        n = j->node.rb_right;
1252                }
1253        }
1254
1255        return found;
1256}
1257
1258static bool test_journal_node(struct dm_integrity_c *ic, unsigned pos, sector_t sector)
1259{
1260        struct journal_node *node, *next_node;
1261        struct rb_node *next;
1262
1263        if (unlikely(pos >= ic->journal_entries))
1264                return false;
1265        node = &ic->journal_tree[pos];
1266        if (unlikely(RB_EMPTY_NODE(&node->node)))
1267                return false;
1268        if (unlikely(node->sector != sector))
1269                return false;
1270
1271        next = rb_next(&node->node);
1272        if (unlikely(!next))
1273                return true;
1274
1275        next_node = container_of(next, struct journal_node, node);
1276        return next_node->sector != sector;
1277}
1278
1279static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node)
1280{
1281        struct rb_node *next;
1282        struct journal_node *next_node;
1283        unsigned next_section;
1284
1285        BUG_ON(RB_EMPTY_NODE(&node->node));
1286
1287        next = rb_next(&node->node);
1288        if (unlikely(!next))
1289                return false;
1290
1291        next_node = container_of(next, struct journal_node, node);
1292
1293        if (next_node->sector != node->sector)
1294                return false;
1295
1296        next_section = (unsigned)(next_node - ic->journal_tree) / ic->journal_section_entries;
1297        if (next_section >= ic->committed_section &&
1298            next_section < ic->committed_section + ic->n_committed_sections)
1299                return true;
1300        if (next_section + ic->journal_sections < ic->committed_section + ic->n_committed_sections)
1301                return true;
1302
1303        return false;
1304}
1305
1306#define TAG_READ        0
1307#define TAG_WRITE       1
1308#define TAG_CMP         2
1309
1310static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
1311                               unsigned *metadata_offset, unsigned total_size, int op)
1312{
1313#define MAY_BE_FILLER           1
1314#define MAY_BE_HASH             2
1315        unsigned hash_offset = 0;
1316        unsigned may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
1317
1318        do {
1319                unsigned char *data, *dp;
1320                struct dm_buffer *b;
1321                unsigned to_copy;
1322                int r;
1323
1324                r = dm_integrity_failed(ic);
1325                if (unlikely(r))
1326                        return r;
1327
1328                data = dm_bufio_read(ic->bufio, *metadata_block, &b);
1329                if (IS_ERR(data))
1330                        return PTR_ERR(data);
1331
1332                to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size);
1333                dp = data + *metadata_offset;
1334                if (op == TAG_READ) {
1335                        memcpy(tag, dp, to_copy);
1336                } else if (op == TAG_WRITE) {
1337                        memcpy(dp, tag, to_copy);
1338                        dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy);
1339                } else {
1340                        /* e.g.: op == TAG_CMP */
1341
1342                        if (likely(is_power_of_2(ic->tag_size))) {
1343                                if (unlikely(memcmp(dp, tag, to_copy)))
1344                                        if (unlikely(!ic->discard) ||
1345                                            unlikely(memchr_inv(dp, DISCARD_FILLER, to_copy) != NULL)) {
1346                                                goto thorough_test;
1347                                }
1348                        } else {
1349                                unsigned i, ts;
1350thorough_test:
1351                                ts = total_size;
1352
1353                                for (i = 0; i < to_copy; i++, ts--) {
1354                                        if (unlikely(dp[i] != tag[i]))
1355                                                may_be &= ~MAY_BE_HASH;
1356                                        if (likely(dp[i] != DISCARD_FILLER))
1357                                                may_be &= ~MAY_BE_FILLER;
1358                                        hash_offset++;
1359                                        if (unlikely(hash_offset == ic->tag_size)) {
1360                                                if (unlikely(!may_be)) {
1361                                                        dm_bufio_release(b);
1362                                                        return ts;
1363                                                }
1364                                                hash_offset = 0;
1365                                                may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
1366                                        }
1367                                }
1368                        }
1369                }
1370                dm_bufio_release(b);
1371
1372                tag += to_copy;
1373                *metadata_offset += to_copy;
1374                if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) {
1375                        (*metadata_block)++;
1376                        *metadata_offset = 0;
1377                }
1378
1379                if (unlikely(!is_power_of_2(ic->tag_size))) {
1380                        hash_offset = (hash_offset + to_copy) % ic->tag_size;
1381                }
1382
1383                total_size -= to_copy;
1384        } while (unlikely(total_size));
1385
1386        return 0;
1387#undef MAY_BE_FILLER
1388#undef MAY_BE_HASH
1389}
1390
1391struct flush_request {
1392        struct dm_io_request io_req;
1393        struct dm_io_region io_reg;
1394        struct dm_integrity_c *ic;
1395        struct completion comp;
1396};
1397
1398static void flush_notify(unsigned long error, void *fr_)
1399{
1400        struct flush_request *fr = fr_;
1401        if (unlikely(error != 0))
1402                dm_integrity_io_error(fr->ic, "flusing disk cache", -EIO);
1403        complete(&fr->comp);
1404}
1405
1406static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_data)
1407{
1408        int r;
1409
1410        struct flush_request fr;
1411
1412        if (!ic->meta_dev)
1413                flush_data = false;
1414        if (flush_data) {
1415                fr.io_req.bi_op = REQ_OP_WRITE,
1416                fr.io_req.bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
1417                fr.io_req.mem.type = DM_IO_KMEM,
1418                fr.io_req.mem.ptr.addr = NULL,
1419                fr.io_req.notify.fn = flush_notify,
1420                fr.io_req.notify.context = &fr;
1421                fr.io_req.client = dm_bufio_get_dm_io_client(ic->bufio),
1422                fr.io_reg.bdev = ic->dev->bdev,
1423                fr.io_reg.sector = 0,
1424                fr.io_reg.count = 0,
1425                fr.ic = ic;
1426                init_completion(&fr.comp);
1427                r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL);
1428                BUG_ON(r);
1429        }
1430
1431        r = dm_bufio_write_dirty_buffers(ic->bufio);
1432        if (unlikely(r))
1433                dm_integrity_io_error(ic, "writing tags", r);
1434
1435        if (flush_data)
1436                wait_for_completion(&fr.comp);
1437}
1438
1439static void sleep_on_endio_wait(struct dm_integrity_c *ic)
1440{
1441        DECLARE_WAITQUEUE(wait, current);
1442        __add_wait_queue(&ic->endio_wait, &wait);
1443        __set_current_state(TASK_UNINTERRUPTIBLE);
1444        spin_unlock_irq(&ic->endio_wait.lock);
1445        io_schedule();
1446        spin_lock_irq(&ic->endio_wait.lock);
1447        __remove_wait_queue(&ic->endio_wait, &wait);
1448}
1449
1450static void autocommit_fn(struct timer_list *t)
1451{
1452        struct dm_integrity_c *ic = from_timer(ic, t, autocommit_timer);
1453
1454        if (likely(!dm_integrity_failed(ic)))
1455                queue_work(ic->commit_wq, &ic->commit_work);
1456}
1457
1458static void schedule_autocommit(struct dm_integrity_c *ic)
1459{
1460        if (!timer_pending(&ic->autocommit_timer))
1461                mod_timer(&ic->autocommit_timer, jiffies + ic->autocommit_jiffies);
1462}
1463
1464static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1465{
1466        struct bio *bio;
1467        unsigned long flags;
1468
1469        spin_lock_irqsave(&ic->endio_wait.lock, flags);
1470        bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1471        bio_list_add(&ic->flush_bio_list, bio);
1472        spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1473
1474        queue_work(ic->commit_wq, &ic->commit_work);
1475}
1476
1477static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
1478{
1479        int r = dm_integrity_failed(ic);
1480        if (unlikely(r) && !bio->bi_status)
1481                bio->bi_status = errno_to_blk_status(r);
1482        if (unlikely(ic->synchronous_mode) && bio_op(bio) == REQ_OP_WRITE) {
1483                unsigned long flags;
1484                spin_lock_irqsave(&ic->endio_wait.lock, flags);
1485                bio_list_add(&ic->synchronous_bios, bio);
1486                queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
1487                spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1488                return;
1489        }
1490        bio_endio(bio);
1491}
1492
1493static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1494{
1495        struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1496
1497        if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic)))
1498                submit_flush_bio(ic, dio);
1499        else
1500                do_endio(ic, bio);
1501}
1502
1503static void dec_in_flight(struct dm_integrity_io *dio)
1504{
1505        if (atomic_dec_and_test(&dio->in_flight)) {
1506                struct dm_integrity_c *ic = dio->ic;
1507                struct bio *bio;
1508
1509                remove_range(ic, &dio->range);
1510
1511                if (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))
1512                        schedule_autocommit(ic);
1513
1514                bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1515
1516                if (unlikely(dio->bi_status) && !bio->bi_status)
1517                        bio->bi_status = dio->bi_status;
1518                if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
1519                        dio->range.logical_sector += dio->range.n_sectors;
1520                        bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
1521                        INIT_WORK(&dio->work, integrity_bio_wait);
1522                        queue_work(ic->offload_wq, &dio->work);
1523                        return;
1524                }
1525                do_endio_flush(ic, dio);
1526        }
1527}
1528
1529static void integrity_end_io(struct bio *bio)
1530{
1531        struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1532
1533        dm_bio_restore(&dio->bio_details, bio);
1534        if (bio->bi_integrity)
1535                bio->bi_opf |= REQ_INTEGRITY;
1536
1537        if (dio->completion)
1538                complete(dio->completion);
1539
1540        dec_in_flight(dio);
1541}
1542
1543static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector,
1544                                      const char *data, char *result)
1545{
1546        __u64 sector_le = cpu_to_le64(sector);
1547        SHASH_DESC_ON_STACK(req, ic->internal_hash);
1548        int r;
1549        unsigned digest_size;
1550
1551        req->tfm = ic->internal_hash;
1552
1553        r = crypto_shash_init(req);
1554        if (unlikely(r < 0)) {
1555                dm_integrity_io_error(ic, "crypto_shash_init", r);
1556                goto failed;
1557        }
1558
1559        r = crypto_shash_update(req, (const __u8 *)&sector_le, sizeof sector_le);
1560        if (unlikely(r < 0)) {
1561                dm_integrity_io_error(ic, "crypto_shash_update", r);
1562                goto failed;
1563        }
1564
1565        r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT);
1566        if (unlikely(r < 0)) {
1567                dm_integrity_io_error(ic, "crypto_shash_update", r);
1568                goto failed;
1569        }
1570
1571        r = crypto_shash_final(req, result);
1572        if (unlikely(r < 0)) {
1573                dm_integrity_io_error(ic, "crypto_shash_final", r);
1574                goto failed;
1575        }
1576
1577        digest_size = crypto_shash_digestsize(ic->internal_hash);
1578        if (unlikely(digest_size < ic->tag_size))
1579                memset(result + digest_size, 0, ic->tag_size - digest_size);
1580
1581        return;
1582
1583failed:
1584        /* this shouldn't happen anyway, the hash functions have no reason to fail */
1585        get_random_bytes(result, ic->tag_size);
1586}
1587
1588static void integrity_metadata(struct work_struct *w)
1589{
1590        struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
1591        struct dm_integrity_c *ic = dio->ic;
1592
1593        int r;
1594
1595        if (ic->internal_hash) {
1596                struct bvec_iter iter;
1597                struct bio_vec bv;
1598                unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1599                struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1600                char *checksums;
1601                unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
1602                char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
1603                sector_t sector;
1604                unsigned sectors_to_process;
1605
1606                if (unlikely(ic->mode == 'R'))
1607                        goto skip_io;
1608
1609                if (likely(dio->op != REQ_OP_DISCARD))
1610                        checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space,
1611                                            GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
1612                else
1613                        checksums = kmalloc(PAGE_SIZE, GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
1614                if (!checksums) {
1615                        checksums = checksums_onstack;
1616                        if (WARN_ON(extra_space &&
1617                                    digest_size > sizeof(checksums_onstack))) {
1618                                r = -EINVAL;
1619                                goto error;
1620                        }
1621                }
1622
1623                if (unlikely(dio->op == REQ_OP_DISCARD)) {
1624                        sector_t bi_sector = dio->bio_details.bi_iter.bi_sector;
1625                        unsigned bi_size = dio->bio_details.bi_iter.bi_size;
1626                        unsigned max_size = likely(checksums != checksums_onstack) ? PAGE_SIZE : HASH_MAX_DIGESTSIZE;
1627                        unsigned max_blocks = max_size / ic->tag_size;
1628                        memset(checksums, DISCARD_FILLER, max_size);
1629
1630                        while (bi_size) {
1631                                unsigned this_step_blocks = bi_size >> (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
1632                                this_step_blocks = min(this_step_blocks, max_blocks);
1633                                r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1634                                                        this_step_blocks * ic->tag_size, TAG_WRITE);
1635                                if (unlikely(r)) {
1636                                        if (likely(checksums != checksums_onstack))
1637                                                kfree(checksums);
1638                                        goto error;
1639                                }
1640
1641                                /*if (bi_size < this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block)) {
1642                                        printk("BUGG: bi_sector: %llx, bi_size: %u\n", bi_sector, bi_size);
1643                                        printk("BUGG: this_step_blocks: %u\n", this_step_blocks);
1644                                        BUG();
1645                                }*/
1646                                bi_size -= this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
1647                                bi_sector += this_step_blocks << ic->sb->log2_sectors_per_block;
1648                        }
1649
1650                        if (likely(checksums != checksums_onstack))
1651                                kfree(checksums);
1652                        goto skip_io;
1653                }
1654
1655                sector = dio->range.logical_sector;
1656                sectors_to_process = dio->range.n_sectors;
1657
1658                __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
1659                        unsigned pos;
1660                        char *mem, *checksums_ptr;
1661
1662again:
1663                        mem = (char *)kmap_atomic(bv.bv_page) + bv.bv_offset;
1664                        pos = 0;
1665                        checksums_ptr = checksums;
1666                        do {
1667                                integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr);
1668                                checksums_ptr += ic->tag_size;
1669                                sectors_to_process -= ic->sectors_per_block;
1670                                pos += ic->sectors_per_block << SECTOR_SHIFT;
1671                                sector += ic->sectors_per_block;
1672                        } while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack);
1673                        kunmap_atomic(mem);
1674
1675                        r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1676                                                checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
1677                        if (unlikely(r)) {
1678                                if (r > 0) {
1679                                        char b[BDEVNAME_SIZE];
1680                                        DMERR_LIMIT("%s: Checksum failed at sector 0x%llx", bio_devname(bio, b),
1681                                                    (sector - ((r + ic->tag_size - 1) / ic->tag_size)));
1682                                        r = -EILSEQ;
1683                                        atomic64_inc(&ic->number_of_mismatches);
1684                                }
1685                                if (likely(checksums != checksums_onstack))
1686                                        kfree(checksums);
1687                                goto error;
1688                        }
1689
1690                        if (!sectors_to_process)
1691                                break;
1692
1693                        if (unlikely(pos < bv.bv_len)) {
1694                                bv.bv_offset += pos;
1695                                bv.bv_len -= pos;
1696                                goto again;
1697                        }
1698                }
1699
1700                if (likely(checksums != checksums_onstack))
1701                        kfree(checksums);
1702        } else {
1703                struct bio_integrity_payload *bip = dio->bio_details.bi_integrity;
1704
1705                if (bip) {
1706                        struct bio_vec biv;
1707                        struct bvec_iter iter;
1708                        unsigned data_to_process = dio->range.n_sectors;
1709                        sector_to_block(ic, data_to_process);
1710                        data_to_process *= ic->tag_size;
1711
1712                        bip_for_each_vec(biv, bip, iter) {
1713                                unsigned char *tag;
1714                                unsigned this_len;
1715
1716                                BUG_ON(PageHighMem(biv.bv_page));
1717                                tag = lowmem_page_address(biv.bv_page) + biv.bv_offset;
1718                                this_len = min(biv.bv_len, data_to_process);
1719                                r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset,
1720                                                        this_len, dio->op == REQ_OP_READ ? TAG_READ : TAG_WRITE);
1721                                if (unlikely(r))
1722                                        goto error;
1723                                data_to_process -= this_len;
1724                                if (!data_to_process)
1725                                        break;
1726                        }
1727                }
1728        }
1729skip_io:
1730        dec_in_flight(dio);
1731        return;
1732error:
1733        dio->bi_status = errno_to_blk_status(r);
1734        dec_in_flight(dio);
1735}
1736
1737static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
1738{
1739        struct dm_integrity_c *ic = ti->private;
1740        struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1741        struct bio_integrity_payload *bip;
1742
1743        sector_t area, offset;
1744
1745        dio->ic = ic;
1746        dio->bi_status = 0;
1747        dio->op = bio_op(bio);
1748
1749        if (unlikely(dio->op == REQ_OP_DISCARD)) {
1750                if (ti->max_io_len) {
1751                        sector_t sec = dm_target_offset(ti, bio->bi_iter.bi_sector);
1752                        unsigned log2_max_io_len = __fls(ti->max_io_len);
1753                        sector_t start_boundary = sec >> log2_max_io_len;
1754                        sector_t end_boundary = (sec + bio_sectors(bio) - 1) >> log2_max_io_len;
1755                        if (start_boundary < end_boundary) {
1756                                sector_t len = ti->max_io_len - (sec & (ti->max_io_len - 1));
1757                                dm_accept_partial_bio(bio, len);
1758                        }
1759                }
1760        }
1761
1762        if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1763                submit_flush_bio(ic, dio);
1764                return DM_MAPIO_SUBMITTED;
1765        }
1766
1767        dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1768        dio->fua = dio->op == REQ_OP_WRITE && bio->bi_opf & REQ_FUA;
1769        if (unlikely(dio->fua)) {
1770                /*
1771                 * Don't pass down the FUA flag because we have to flush
1772                 * disk cache anyway.
1773                 */
1774                bio->bi_opf &= ~REQ_FUA;
1775        }
1776        if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) {
1777                DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx",
1778                      dio->range.logical_sector, bio_sectors(bio),
1779                      ic->provided_data_sectors);
1780                return DM_MAPIO_KILL;
1781        }
1782        if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) {
1783                DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
1784                      ic->sectors_per_block,
1785                      dio->range.logical_sector, bio_sectors(bio));
1786                return DM_MAPIO_KILL;
1787        }
1788
1789        if (ic->sectors_per_block > 1 && likely(dio->op != REQ_OP_DISCARD)) {
1790                struct bvec_iter iter;
1791                struct bio_vec bv;
1792                bio_for_each_segment(bv, bio, iter) {
1793                        if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
1794                                DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
1795                                        bv.bv_offset, bv.bv_len, ic->sectors_per_block);
1796                                return DM_MAPIO_KILL;
1797                        }
1798                }
1799        }
1800
1801        bip = bio_integrity(bio);
1802        if (!ic->internal_hash) {
1803                if (bip) {
1804                        unsigned wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block;
1805                        if (ic->log2_tag_size >= 0)
1806                                wanted_tag_size <<= ic->log2_tag_size;
1807                        else
1808                                wanted_tag_size *= ic->tag_size;
1809                        if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) {
1810                                DMERR("Invalid integrity data size %u, expected %u",
1811                                      bip->bip_iter.bi_size, wanted_tag_size);
1812                                return DM_MAPIO_KILL;
1813                        }
1814                }
1815        } else {
1816                if (unlikely(bip != NULL)) {
1817                        DMERR("Unexpected integrity data when using internal hash");
1818                        return DM_MAPIO_KILL;
1819                }
1820        }
1821
1822        if (unlikely(ic->mode == 'R') && unlikely(dio->op != REQ_OP_READ))
1823                return DM_MAPIO_KILL;
1824
1825        get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1826        dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1827        bio->bi_iter.bi_sector = get_data_sector(ic, area, offset);
1828
1829        dm_integrity_map_continue(dio, true);
1830        return DM_MAPIO_SUBMITTED;
1831}
1832
1833static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
1834                                 unsigned journal_section, unsigned journal_entry)
1835{
1836        struct dm_integrity_c *ic = dio->ic;
1837        sector_t logical_sector;
1838        unsigned n_sectors;
1839
1840        logical_sector = dio->range.logical_sector;
1841        n_sectors = dio->range.n_sectors;
1842        do {
1843                struct bio_vec bv = bio_iovec(bio);
1844                char *mem;
1845
1846                if (unlikely(bv.bv_len >> SECTOR_SHIFT > n_sectors))
1847                        bv.bv_len = n_sectors << SECTOR_SHIFT;
1848                n_sectors -= bv.bv_len >> SECTOR_SHIFT;
1849                bio_advance_iter(bio, &bio->bi_iter, bv.bv_len);
1850retry_kmap:
1851                mem = kmap_atomic(bv.bv_page);
1852                if (likely(dio->op == REQ_OP_WRITE))
1853                        flush_dcache_page(bv.bv_page);
1854
1855                do {
1856                        struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry);
1857
1858                        if (unlikely(dio->op == REQ_OP_READ)) {
1859                                struct journal_sector *js;
1860                                char *mem_ptr;
1861                                unsigned s;
1862
1863                                if (unlikely(journal_entry_is_inprogress(je))) {
1864                                        flush_dcache_page(bv.bv_page);
1865                                        kunmap_atomic(mem);
1866
1867                                        __io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
1868                                        goto retry_kmap;
1869                                }
1870                                smp_rmb();
1871                                BUG_ON(journal_entry_get_sector(je) != logical_sector);
1872                                js = access_journal_data(ic, journal_section, journal_entry);
1873                                mem_ptr = mem + bv.bv_offset;
1874                                s = 0;
1875                                do {
1876                                        memcpy(mem_ptr, js, JOURNAL_SECTOR_DATA);
1877                                        *(commit_id_t *)(mem_ptr + JOURNAL_SECTOR_DATA) = je->last_bytes[s];
1878                                        js++;
1879                                        mem_ptr += 1 << SECTOR_SHIFT;
1880                                } while (++s < ic->sectors_per_block);
1881#ifdef INTERNAL_VERIFY
1882                                if (ic->internal_hash) {
1883                                        char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
1884
1885                                        integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
1886                                        if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
1887                                                DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx",
1888                                                            logical_sector);
1889                                        }
1890                                }
1891#endif
1892                        }
1893
1894                        if (!ic->internal_hash) {
1895                                struct bio_integrity_payload *bip = bio_integrity(bio);
1896                                unsigned tag_todo = ic->tag_size;
1897                                char *tag_ptr = journal_entry_tag(ic, je);
1898
1899                                if (bip) do {
1900                                        struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
1901                                        unsigned tag_now = min(biv.bv_len, tag_todo);
1902                                        char *tag_addr;
1903                                        BUG_ON(PageHighMem(biv.bv_page));
1904                                        tag_addr = lowmem_page_address(biv.bv_page) + biv.bv_offset;
1905                                        if (likely(dio->op == REQ_OP_WRITE))
1906                                                memcpy(tag_ptr, tag_addr, tag_now);
1907                                        else
1908                                                memcpy(tag_addr, tag_ptr, tag_now);
1909                                        bvec_iter_advance(bip->bip_vec, &bip->bip_iter, tag_now);
1910                                        tag_ptr += tag_now;
1911                                        tag_todo -= tag_now;
1912                                } while (unlikely(tag_todo)); else {
1913                                        if (likely(dio->op == REQ_OP_WRITE))
1914                                                memset(tag_ptr, 0, tag_todo);
1915                                }
1916                        }
1917
1918                        if (likely(dio->op == REQ_OP_WRITE)) {
1919                                struct journal_sector *js;
1920                                unsigned s;
1921
1922                                js = access_journal_data(ic, journal_section, journal_entry);
1923                                memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT);
1924
1925                                s = 0;
1926                                do {
1927                                        je->last_bytes[s] = js[s].commit_id;
1928                                } while (++s < ic->sectors_per_block);
1929
1930                                if (ic->internal_hash) {
1931                                        unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1932                                        if (unlikely(digest_size > ic->tag_size)) {
1933                                                char checksums_onstack[HASH_MAX_DIGESTSIZE];
1934                                                integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
1935                                                memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size);
1936                                        } else
1937                                                integrity_sector_checksum(ic, logical_sector, (char *)js, journal_entry_tag(ic, je));
1938                                }
1939
1940                                journal_entry_set_sector(je, logical_sector);
1941                        }
1942                        logical_sector += ic->sectors_per_block;
1943
1944                        journal_entry++;
1945                        if (unlikely(journal_entry == ic->journal_section_entries)) {
1946                                journal_entry = 0;
1947                                journal_section++;
1948                                wraparound_section(ic, &journal_section);
1949                        }
1950
1951                        bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT;
1952                } while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT);
1953
1954                if (unlikely(dio->op == REQ_OP_READ))
1955                        flush_dcache_page(bv.bv_page);
1956                kunmap_atomic(mem);
1957        } while (n_sectors);
1958
1959        if (likely(dio->op == REQ_OP_WRITE)) {
1960                smp_mb();
1961                if (unlikely(waitqueue_active(&ic->copy_to_journal_wait)))
1962                        wake_up(&ic->copy_to_journal_wait);
1963                if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) {
1964                        queue_work(ic->commit_wq, &ic->commit_work);
1965                } else {
1966                        schedule_autocommit(ic);
1967                }
1968        } else {
1969                remove_range(ic, &dio->range);
1970        }
1971
1972        if (unlikely(bio->bi_iter.bi_size)) {
1973                sector_t area, offset;
1974
1975                dio->range.logical_sector = logical_sector;
1976                get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1977                dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1978                return true;
1979        }
1980
1981        return false;
1982}
1983
1984static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map)
1985{
1986        struct dm_integrity_c *ic = dio->ic;
1987        struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1988        unsigned journal_section, journal_entry;
1989        unsigned journal_read_pos;
1990        struct completion read_comp;
1991        bool discard_retried = false;
1992        bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ;
1993        if (unlikely(dio->op == REQ_OP_DISCARD) && ic->mode != 'D')
1994                need_sync_io = true;
1995
1996        if (need_sync_io && from_map) {
1997                INIT_WORK(&dio->work, integrity_bio_wait);
1998                queue_work(ic->offload_wq, &dio->work);
1999                return;
2000        }
2001
2002lock_retry:
2003        spin_lock_irq(&ic->endio_wait.lock);
2004retry:
2005        if (unlikely(dm_integrity_failed(ic))) {
2006                spin_unlock_irq(&ic->endio_wait.lock);
2007                do_endio(ic, bio);
2008                return;
2009        }
2010        dio->range.n_sectors = bio_sectors(bio);
2011        journal_read_pos = NOT_FOUND;
2012        if (ic->mode == 'J' && likely(dio->op != REQ_OP_DISCARD)) {
2013                if (dio->op == REQ_OP_WRITE) {
2014                        unsigned next_entry, i, pos;
2015                        unsigned ws, we, range_sectors;
2016
2017                        dio->range.n_sectors = min(dio->range.n_sectors,
2018                                                   (sector_t)ic->free_sectors << ic->sb->log2_sectors_per_block);
2019                        if (unlikely(!dio->range.n_sectors)) {
2020                                if (from_map)
2021                                        goto offload_to_thread;
2022                                sleep_on_endio_wait(ic);
2023                                goto retry;
2024                        }
2025                        range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block;
2026                        ic->free_sectors -= range_sectors;
2027                        journal_section = ic->free_section;
2028                        journal_entry = ic->free_section_entry;
2029
2030                        next_entry = ic->free_section_entry + range_sectors;
2031                        ic->free_section_entry = next_entry % ic->journal_section_entries;
2032                        ic->free_section += next_entry / ic->journal_section_entries;
2033                        ic->n_uncommitted_sections += next_entry / ic->journal_section_entries;
2034                        wraparound_section(ic, &ic->free_section);
2035
2036                        pos = journal_section * ic->journal_section_entries + journal_entry;
2037                        ws = journal_section;
2038                        we = journal_entry;
2039                        i = 0;
2040                        do {
2041                                struct journal_entry *je;
2042
2043                                add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i);
2044                                pos++;
2045                                if (unlikely(pos >= ic->journal_entries))
2046                                        pos = 0;
2047
2048                                je = access_journal_entry(ic, ws, we);
2049                                BUG_ON(!journal_entry_is_unused(je));
2050                                journal_entry_set_inprogress(je);
2051                                we++;
2052                                if (unlikely(we == ic->journal_section_entries)) {
2053                                        we = 0;
2054                                        ws++;
2055                                        wraparound_section(ic, &ws);
2056                                }
2057                        } while ((i += ic->sectors_per_block) < dio->range.n_sectors);
2058
2059                        spin_unlock_irq(&ic->endio_wait.lock);
2060                        goto journal_read_write;
2061                } else {
2062                        sector_t next_sector;
2063                        journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2064                        if (likely(journal_read_pos == NOT_FOUND)) {
2065                                if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector))
2066                                        dio->range.n_sectors = next_sector - dio->range.logical_sector;
2067                        } else {
2068                                unsigned i;
2069                                unsigned jp = journal_read_pos + 1;
2070                                for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) {
2071                                        if (!test_journal_node(ic, jp, dio->range.logical_sector + i))
2072                                                break;
2073                                }
2074                                dio->range.n_sectors = i;
2075                        }
2076                }
2077        }
2078        if (unlikely(!add_new_range(ic, &dio->range, true))) {
2079                /*
2080                 * We must not sleep in the request routine because it could
2081                 * stall bios on current->bio_list.
2082                 * So, we offload the bio to a workqueue if we have to sleep.
2083                 */
2084                if (from_map) {
2085offload_to_thread:
2086                        spin_unlock_irq(&ic->endio_wait.lock);
2087                        INIT_WORK(&dio->work, integrity_bio_wait);
2088                        queue_work(ic->wait_wq, &dio->work);
2089                        return;
2090                }
2091                if (journal_read_pos != NOT_FOUND)
2092                        dio->range.n_sectors = ic->sectors_per_block;
2093                wait_and_add_new_range(ic, &dio->range);
2094                /*
2095                 * wait_and_add_new_range drops the spinlock, so the journal
2096                 * may have been changed arbitrarily. We need to recheck.
2097                 * To simplify the code, we restrict I/O size to just one block.
2098                 */
2099                if (journal_read_pos != NOT_FOUND) {
2100                        sector_t next_sector;
2101                        unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2102                        if (unlikely(new_pos != journal_read_pos)) {
2103                                remove_range_unlocked(ic, &dio->range);
2104                                goto retry;
2105                        }
2106                }
2107        }
2108        if (ic->mode == 'J' && likely(dio->op == REQ_OP_DISCARD) && !discard_retried) {
2109                sector_t next_sector;
2110                unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2111                if (unlikely(new_pos != NOT_FOUND) ||
2112                    unlikely(next_sector < dio->range.logical_sector - dio->range.n_sectors)) {
2113                        remove_range_unlocked(ic, &dio->range);
2114                        spin_unlock_irq(&ic->endio_wait.lock);
2115                        queue_work(ic->commit_wq, &ic->commit_work);
2116                        flush_workqueue(ic->commit_wq);
2117                        queue_work(ic->writer_wq, &ic->writer_work);
2118                        flush_workqueue(ic->writer_wq);
2119                        discard_retried = true;
2120                        goto lock_retry;
2121                }
2122        }
2123        spin_unlock_irq(&ic->endio_wait.lock);
2124
2125        if (unlikely(journal_read_pos != NOT_FOUND)) {
2126                journal_section = journal_read_pos / ic->journal_section_entries;
2127                journal_entry = journal_read_pos % ic->journal_section_entries;
2128                goto journal_read_write;
2129        }
2130
2131        if (ic->mode == 'B' && (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))) {
2132                if (!block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2133                                     dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
2134                        struct bitmap_block_status *bbs;
2135
2136                        bbs = sector_to_bitmap_block(ic, dio->range.logical_sector);
2137                        spin_lock(&bbs->bio_queue_lock);
2138                        bio_list_add(&bbs->bio_queue, bio);
2139                        spin_unlock(&bbs->bio_queue_lock);
2140                        queue_work(ic->writer_wq, &bbs->work);
2141                        return;
2142                }
2143        }
2144
2145        dio->in_flight = (atomic_t)ATOMIC_INIT(2);
2146
2147        if (need_sync_io) {
2148                init_completion(&read_comp);
2149                dio->completion = &read_comp;
2150        } else
2151                dio->completion = NULL;
2152
2153        dm_bio_record(&dio->bio_details, bio);
2154        bio_set_dev(bio, ic->dev->bdev);
2155        bio->bi_integrity = NULL;
2156        bio->bi_opf &= ~REQ_INTEGRITY;
2157        bio->bi_end_io = integrity_end_io;
2158        bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
2159
2160        if (unlikely(dio->op == REQ_OP_DISCARD) && likely(ic->mode != 'D')) {
2161                integrity_metadata(&dio->work);
2162                dm_integrity_flush_buffers(ic, false);
2163
2164                dio->in_flight = (atomic_t)ATOMIC_INIT(1);
2165                dio->completion = NULL;
2166
2167                submit_bio_noacct(bio);
2168
2169                return;
2170        }
2171
2172        submit_bio_noacct(bio);
2173
2174        if (need_sync_io) {
2175                wait_for_completion_io(&read_comp);
2176                if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
2177                    dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector))
2178                        goto skip_check;
2179                if (ic->mode == 'B') {
2180                        if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector,
2181                                             dio->range.n_sectors, BITMAP_OP_TEST_ALL_CLEAR))
2182                                goto skip_check;
2183                }
2184
2185                if (likely(!bio->bi_status))
2186                        integrity_metadata(&dio->work);
2187                else
2188skip_check:
2189                        dec_in_flight(dio);
2190
2191        } else {
2192                INIT_WORK(&dio->work, integrity_metadata);
2193                queue_work(ic->metadata_wq, &dio->work);
2194        }
2195
2196        return;
2197
2198journal_read_write:
2199        if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry)))
2200                goto lock_retry;
2201
2202        do_endio_flush(ic, dio);
2203}
2204
2205
2206static void integrity_bio_wait(struct work_struct *w)
2207{
2208        struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
2209
2210        dm_integrity_map_continue(dio, false);
2211}
2212
2213static void pad_uncommitted(struct dm_integrity_c *ic)
2214{
2215        if (ic->free_section_entry) {
2216                ic->free_sectors -= ic->journal_section_entries - ic->free_section_entry;
2217                ic->free_section_entry = 0;
2218                ic->free_section++;
2219                wraparound_section(ic, &ic->free_section);
2220                ic->n_uncommitted_sections++;
2221        }
2222        if (WARN_ON(ic->journal_sections * ic->journal_section_entries !=
2223                    (ic->n_uncommitted_sections + ic->n_committed_sections) *
2224                    ic->journal_section_entries + ic->free_sectors)) {
2225                DMCRIT("journal_sections %u, journal_section_entries %u, "
2226                       "n_uncommitted_sections %u, n_committed_sections %u, "
2227                       "journal_section_entries %u, free_sectors %u",
2228                       ic->journal_sections, ic->journal_section_entries,
2229                       ic->n_uncommitted_sections, ic->n_committed_sections,
2230                       ic->journal_section_entries, ic->free_sectors);
2231        }
2232}
2233
2234static void integrity_commit(struct work_struct *w)
2235{
2236        struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work);
2237        unsigned commit_start, commit_sections;
2238        unsigned i, j, n;
2239        struct bio *flushes;
2240
2241        del_timer(&ic->autocommit_timer);
2242
2243        spin_lock_irq(&ic->endio_wait.lock);
2244        flushes = bio_list_get(&ic->flush_bio_list);
2245        if (unlikely(ic->mode != 'J')) {
2246                spin_unlock_irq(&ic->endio_wait.lock);
2247                dm_integrity_flush_buffers(ic, true);
2248                goto release_flush_bios;
2249        }
2250
2251        pad_uncommitted(ic);
2252        commit_start = ic->uncommitted_section;
2253        commit_sections = ic->n_uncommitted_sections;
2254        spin_unlock_irq(&ic->endio_wait.lock);
2255
2256        if (!commit_sections)
2257                goto release_flush_bios;
2258
2259        i = commit_start;
2260        for (n = 0; n < commit_sections; n++) {
2261                for (j = 0; j < ic->journal_section_entries; j++) {
2262                        struct journal_entry *je;
2263                        je = access_journal_entry(ic, i, j);
2264                        io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
2265                }
2266                for (j = 0; j < ic->journal_section_sectors; j++) {
2267                        struct journal_sector *js;
2268                        js = access_journal(ic, i, j);
2269                        js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq);
2270                }
2271                i++;
2272                if (unlikely(i >= ic->journal_sections))
2273                        ic->commit_seq = next_commit_seq(ic->commit_seq);
2274                wraparound_section(ic, &i);
2275        }
2276        smp_rmb();
2277
2278        write_journal(ic, commit_start, commit_sections);
2279
2280        spin_lock_irq(&ic->endio_wait.lock);
2281        ic->uncommitted_section += commit_sections;
2282        wraparound_section(ic, &ic->uncommitted_section);
2283        ic->n_uncommitted_sections -= commit_sections;
2284        ic->n_committed_sections += commit_sections;
2285        spin_unlock_irq(&ic->endio_wait.lock);
2286
2287        if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
2288                queue_work(ic->writer_wq, &ic->writer_work);
2289
2290release_flush_bios:
2291        while (flushes) {
2292                struct bio *next = flushes->bi_next;
2293                flushes->bi_next = NULL;
2294                do_endio(ic, flushes);
2295                flushes = next;
2296        }
2297}
2298
2299static void complete_copy_from_journal(unsigned long error, void *context)
2300{
2301        struct journal_io *io = context;
2302        struct journal_completion *comp = io->comp;
2303        struct dm_integrity_c *ic = comp->ic;
2304        remove_range(ic, &io->range);
2305        mempool_free(io, &ic->journal_io_mempool);
2306        if (unlikely(error != 0))
2307                dm_integrity_io_error(ic, "copying from journal", -EIO);
2308        complete_journal_op(comp);
2309}
2310
2311static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js,
2312                               struct journal_entry *je)
2313{
2314        unsigned s = 0;
2315        do {
2316                js->commit_id = je->last_bytes[s];
2317                js++;
2318        } while (++s < ic->sectors_per_block);
2319}
2320
2321static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
2322                             unsigned write_sections, bool from_replay)
2323{
2324        unsigned i, j, n;
2325        struct journal_completion comp;
2326        struct blk_plug plug;
2327
2328        blk_start_plug(&plug);
2329
2330        comp.ic = ic;
2331        comp.in_flight = (atomic_t)ATOMIC_INIT(1);
2332        init_completion(&comp.comp);
2333
2334        i = write_start;
2335        for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) {
2336#ifndef INTERNAL_VERIFY
2337                if (unlikely(from_replay))
2338#endif
2339                        rw_section_mac(ic, i, false);
2340                for (j = 0; j < ic->journal_section_entries; j++) {
2341                        struct journal_entry *je = access_journal_entry(ic, i, j);
2342                        sector_t sec, area, offset;
2343                        unsigned k, l, next_loop;
2344                        sector_t metadata_block;
2345                        unsigned metadata_offset;
2346                        struct journal_io *io;
2347
2348                        if (journal_entry_is_unused(je))
2349                                continue;
2350                        BUG_ON(unlikely(journal_entry_is_inprogress(je)) && !from_replay);
2351                        sec = journal_entry_get_sector(je);
2352                        if (unlikely(from_replay)) {
2353                                if (unlikely(sec & (unsigned)(ic->sectors_per_block - 1))) {
2354                                        dm_integrity_io_error(ic, "invalid sector in journal", -EIO);
2355                                        sec &= ~(sector_t)(ic->sectors_per_block - 1);
2356                                }
2357                        }
2358                        if (unlikely(sec >= ic->provided_data_sectors))
2359                                continue;
2360                        get_area_and_offset(ic, sec, &area, &offset);
2361                        restore_last_bytes(ic, access_journal_data(ic, i, j), je);
2362                        for (k = j + 1; k < ic->journal_section_entries; k++) {
2363                                struct journal_entry *je2 = access_journal_entry(ic, i, k);
2364                                sector_t sec2, area2, offset2;
2365                                if (journal_entry_is_unused(je2))
2366                                        break;
2367                                BUG_ON(unlikely(journal_entry_is_inprogress(je2)) && !from_replay);
2368                                sec2 = journal_entry_get_sector(je2);
2369                                if (unlikely(sec2 >= ic->provided_data_sectors))
2370                                        break;
2371                                get_area_and_offset(ic, sec2, &area2, &offset2);
2372                                if (area2 != area || offset2 != offset + ((k - j) << ic->sb->log2_sectors_per_block))
2373                                        break;
2374                                restore_last_bytes(ic, access_journal_data(ic, i, k), je2);
2375                        }
2376                        next_loop = k - 1;
2377
2378                        io = mempool_alloc(&ic->journal_io_mempool, GFP_NOIO);
2379                        io->comp = &comp;
2380                        io->range.logical_sector = sec;
2381                        io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block;
2382
2383                        spin_lock_irq(&ic->endio_wait.lock);
2384                        add_new_range_and_wait(ic, &io->range);
2385
2386                        if (likely(!from_replay)) {
2387                                struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries];
2388
2389                                /* don't write if there is newer committed sector */
2390                                while (j < k && find_newer_committed_node(ic, &section_node[j])) {
2391                                        struct journal_entry *je2 = access_journal_entry(ic, i, j);
2392
2393                                        journal_entry_set_unused(je2);
2394                                        remove_journal_node(ic, &section_node[j]);
2395                                        j++;
2396                                        sec += ic->sectors_per_block;
2397                                        offset += ic->sectors_per_block;
2398                                }
2399                                while (j < k && find_newer_committed_node(ic, &section_node[k - 1])) {
2400                                        struct journal_entry *je2 = access_journal_entry(ic, i, k - 1);
2401
2402                                        journal_entry_set_unused(je2);
2403                                        remove_journal_node(ic, &section_node[k - 1]);
2404                                        k--;
2405                                }
2406                                if (j == k) {
2407                                        remove_range_unlocked(ic, &io->range);
2408                                        spin_unlock_irq(&ic->endio_wait.lock);
2409                                        mempool_free(io, &ic->journal_io_mempool);
2410                                        goto skip_io;
2411                                }
2412                                for (l = j; l < k; l++) {
2413                                        remove_journal_node(ic, &section_node[l]);
2414                                }
2415                        }
2416                        spin_unlock_irq(&ic->endio_wait.lock);
2417
2418                        metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2419                        for (l = j; l < k; l++) {
2420                                int r;
2421                                struct journal_entry *je2 = access_journal_entry(ic, i, l);
2422
2423                                if (
2424#ifndef INTERNAL_VERIFY
2425                                    unlikely(from_replay) &&
2426#endif
2427                                    ic->internal_hash) {
2428                                        char test_tag[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
2429
2430                                        integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
2431                                                                  (char *)access_journal_data(ic, i, l), test_tag);
2432                                        if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size)))
2433                                                dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ);
2434                                }
2435
2436                                journal_entry_set_unused(je2);
2437                                r = dm_integrity_rw_tag(ic, journal_entry_tag(ic, je2), &metadata_block, &metadata_offset,
2438                                                        ic->tag_size, TAG_WRITE);
2439                                if (unlikely(r)) {
2440                                        dm_integrity_io_error(ic, "reading tags", r);
2441                                }
2442                        }
2443
2444                        atomic_inc(&comp.in_flight);
2445                        copy_from_journal(ic, i, j << ic->sb->log2_sectors_per_block,
2446                                          (k - j) << ic->sb->log2_sectors_per_block,
2447                                          get_data_sector(ic, area, offset),
2448                                          complete_copy_from_journal, io);
2449skip_io:
2450                        j = next_loop;
2451                }
2452        }
2453
2454        dm_bufio_write_dirty_buffers_async(ic->bufio);
2455
2456        blk_finish_plug(&plug);
2457
2458        complete_journal_op(&comp);
2459        wait_for_completion_io(&comp.comp);
2460
2461        dm_integrity_flush_buffers(ic, true);
2462}
2463
2464static void integrity_writer(struct work_struct *w)
2465{
2466        struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work);
2467        unsigned write_start, write_sections;
2468
2469        unsigned prev_free_sectors;
2470
2471        /* the following test is not needed, but it tests the replay code */
2472        if (unlikely(dm_post_suspending(ic->ti)) && !ic->meta_dev)
2473                return;
2474
2475        spin_lock_irq(&ic->endio_wait.lock);
2476        write_start = ic->committed_section;
2477        write_sections = ic->n_committed_sections;
2478        spin_unlock_irq(&ic->endio_wait.lock);
2479
2480        if (!write_sections)
2481                return;
2482
2483        do_journal_write(ic, write_start, write_sections, false);
2484
2485        spin_lock_irq(&ic->endio_wait.lock);
2486
2487        ic->committed_section += write_sections;
2488        wraparound_section(ic, &ic->committed_section);
2489        ic->n_committed_sections -= write_sections;
2490
2491        prev_free_sectors = ic->free_sectors;
2492        ic->free_sectors += write_sections * ic->journal_section_entries;
2493        if (unlikely(!prev_free_sectors))
2494                wake_up_locked(&ic->endio_wait);
2495
2496        spin_unlock_irq(&ic->endio_wait.lock);
2497}
2498
2499static void recalc_write_super(struct dm_integrity_c *ic)
2500{
2501        int r;
2502
2503        dm_integrity_flush_buffers(ic, false);
2504        if (dm_integrity_failed(ic))
2505                return;
2506
2507        r = sync_rw_sb(ic, REQ_OP_WRITE, 0);
2508        if (unlikely(r))
2509                dm_integrity_io_error(ic, "writing superblock", r);
2510}
2511
2512static void integrity_recalc(struct work_struct *w)
2513{
2514        struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work);
2515        struct dm_integrity_range range;
2516        struct dm_io_request io_req;
2517        struct dm_io_region io_loc;
2518        sector_t area, offset;
2519        sector_t metadata_block;
2520        unsigned metadata_offset;
2521        sector_t logical_sector, n_sectors;
2522        __u8 *t;
2523        unsigned i;
2524        int r;
2525        unsigned super_counter = 0;
2526
2527        DEBUG_print("start recalculation... (position %llx)\n", le64_to_cpu(ic->sb->recalc_sector));
2528
2529        spin_lock_irq(&ic->endio_wait.lock);
2530
2531next_chunk:
2532
2533        if (unlikely(dm_post_suspending(ic->ti)))
2534                goto unlock_ret;
2535
2536        range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
2537        if (unlikely(range.logical_sector >= ic->provided_data_sectors)) {
2538                if (ic->mode == 'B') {
2539                        block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
2540                        DEBUG_print("queue_delayed_work: bitmap_flush_work\n");
2541                        queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
2542                }
2543                goto unlock_ret;
2544        }
2545
2546        get_area_and_offset(ic, range.logical_sector, &area, &offset);
2547        range.n_sectors = min((sector_t)RECALC_SECTORS, ic->provided_data_sectors - range.logical_sector);
2548        if (!ic->meta_dev)
2549                range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned)offset);
2550
2551        add_new_range_and_wait(ic, &range);
2552        spin_unlock_irq(&ic->endio_wait.lock);
2553        logical_sector = range.logical_sector;
2554        n_sectors = range.n_sectors;
2555
2556        if (ic->mode == 'B') {
2557                if (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, n_sectors, BITMAP_OP_TEST_ALL_CLEAR)) {
2558                        goto advance_and_next;
2559                }
2560                while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector,
2561                                       ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
2562                        logical_sector += ic->sectors_per_block;
2563                        n_sectors -= ic->sectors_per_block;
2564                        cond_resched();
2565                }
2566                while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector + n_sectors - ic->sectors_per_block,
2567                                       ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
2568                        n_sectors -= ic->sectors_per_block;
2569                        cond_resched();
2570                }
2571                get_area_and_offset(ic, logical_sector, &area, &offset);
2572        }
2573
2574        DEBUG_print("recalculating: %llx, %llx\n", logical_sector, n_sectors);
2575
2576        if (unlikely(++super_counter == RECALC_WRITE_SUPER)) {
2577                recalc_write_super(ic);
2578                if (ic->mode == 'B') {
2579                        queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
2580                }
2581                super_counter = 0;
2582        }
2583
2584        if (unlikely(dm_integrity_failed(ic)))
2585                goto err;
2586
2587        io_req.bi_op = REQ_OP_READ;
2588        io_req.bi_op_flags = 0;
2589        io_req.mem.type = DM_IO_VMA;
2590        io_req.mem.ptr.addr = ic->recalc_buffer;
2591        io_req.notify.fn = NULL;
2592        io_req.client = ic->io;
2593        io_loc.bdev = ic->dev->bdev;
2594        io_loc.sector = get_data_sector(ic, area, offset);
2595        io_loc.count = n_sectors;
2596
2597        r = dm_io(&io_req, 1, &io_loc, NULL);
2598        if (unlikely(r)) {
2599                dm_integrity_io_error(ic, "reading data", r);
2600                goto err;
2601        }
2602
2603        t = ic->recalc_tags;
2604        for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
2605                integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
2606                t += ic->tag_size;
2607        }
2608
2609        metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2610
2611        r = dm_integrity_rw_tag(ic, ic->recalc_tags, &metadata_block, &metadata_offset, t - ic->recalc_tags, TAG_WRITE);
2612        if (unlikely(r)) {
2613                dm_integrity_io_error(ic, "writing tags", r);
2614                goto err;
2615        }
2616
2617        if (ic->mode == 'B') {
2618                sector_t start, end;
2619                start = (range.logical_sector >>
2620                         (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
2621                        (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2622                end = ((range.logical_sector + range.n_sectors) >>
2623                       (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
2624                        (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2625                block_bitmap_op(ic, ic->recalc_bitmap, start, end - start, BITMAP_OP_CLEAR);
2626        }
2627
2628advance_and_next:
2629        cond_resched();
2630
2631        spin_lock_irq(&ic->endio_wait.lock);
2632        remove_range_unlocked(ic, &range);
2633        ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors);
2634        goto next_chunk;
2635
2636err:
2637        remove_range(ic, &range);
2638        return;
2639
2640unlock_ret:
2641        spin_unlock_irq(&ic->endio_wait.lock);
2642
2643        recalc_write_super(ic);
2644}
2645
2646static void bitmap_block_work(struct work_struct *w)
2647{
2648        struct bitmap_block_status *bbs = container_of(w, struct bitmap_block_status, work);
2649        struct dm_integrity_c *ic = bbs->ic;
2650        struct bio *bio;
2651        struct bio_list bio_queue;
2652        struct bio_list waiting;
2653
2654        bio_list_init(&waiting);
2655
2656        spin_lock(&bbs->bio_queue_lock);
2657        bio_queue = bbs->bio_queue;
2658        bio_list_init(&bbs->bio_queue);
2659        spin_unlock(&bbs->bio_queue_lock);
2660
2661        while ((bio = bio_list_pop(&bio_queue))) {
2662                struct dm_integrity_io *dio;
2663
2664                dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
2665
2666                if (block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2667                                    dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
2668                        remove_range(ic, &dio->range);
2669                        INIT_WORK(&dio->work, integrity_bio_wait);
2670                        queue_work(ic->offload_wq, &dio->work);
2671                } else {
2672                        block_bitmap_op(ic, ic->journal, dio->range.logical_sector,
2673                                        dio->range.n_sectors, BITMAP_OP_SET);
2674                        bio_list_add(&waiting, bio);
2675                }
2676        }
2677
2678        if (bio_list_empty(&waiting))
2679                return;
2680
2681        rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC,
2682                           bbs->idx * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT),
2683                           BITMAP_BLOCK_SIZE >> SECTOR_SHIFT, NULL);
2684
2685        while ((bio = bio_list_pop(&waiting))) {
2686                struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
2687
2688                block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2689                                dio->range.n_sectors, BITMAP_OP_SET);
2690
2691                remove_range(ic, &dio->range);
2692                INIT_WORK(&dio->work, integrity_bio_wait);
2693                queue_work(ic->offload_wq, &dio->work);
2694        }
2695
2696        queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
2697}
2698
2699static void bitmap_flush_work(struct work_struct *work)
2700{
2701        struct dm_integrity_c *ic = container_of(work, struct dm_integrity_c, bitmap_flush_work.work);
2702        struct dm_integrity_range range;
2703        unsigned long limit;
2704        struct bio *bio;
2705
2706        dm_integrity_flush_buffers(ic, false);
2707
2708        range.logical_sector = 0;
2709        range.n_sectors = ic->provided_data_sectors;
2710
2711        spin_lock_irq(&ic->endio_wait.lock);
2712        add_new_range_and_wait(ic, &range);
2713        spin_unlock_irq(&ic->endio_wait.lock);
2714
2715        dm_integrity_flush_buffers(ic, true);
2716
2717        limit = ic->provided_data_sectors;
2718        if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
2719                limit = le64_to_cpu(ic->sb->recalc_sector)
2720                        >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)
2721                        << (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2722        }
2723        /*DEBUG_print("zeroing journal\n");*/
2724        block_bitmap_op(ic, ic->journal, 0, limit, BITMAP_OP_CLEAR);
2725        block_bitmap_op(ic, ic->may_write_bitmap, 0, limit, BITMAP_OP_CLEAR);
2726
2727        rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
2728                           ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
2729
2730        spin_lock_irq(&ic->endio_wait.lock);
2731        remove_range_unlocked(ic, &range);
2732        while (unlikely((bio = bio_list_pop(&ic->synchronous_bios)) != NULL)) {
2733                bio_endio(bio);
2734                spin_unlock_irq(&ic->endio_wait.lock);
2735                spin_lock_irq(&ic->endio_wait.lock);
2736        }
2737        spin_unlock_irq(&ic->endio_wait.lock);
2738}
2739
2740
2741static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
2742                         unsigned n_sections, unsigned char commit_seq)
2743{
2744        unsigned i, j, n;
2745
2746        if (!n_sections)
2747                return;
2748
2749        for (n = 0; n < n_sections; n++) {
2750                i = start_section + n;
2751                wraparound_section(ic, &i);
2752                for (j = 0; j < ic->journal_section_sectors; j++) {
2753                        struct journal_sector *js = access_journal(ic, i, j);
2754                        memset(&js->entries, 0, JOURNAL_SECTOR_DATA);
2755                        js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq);
2756                }
2757                for (j = 0; j < ic->journal_section_entries; j++) {
2758                        struct journal_entry *je = access_journal_entry(ic, i, j);
2759                        journal_entry_set_unused(je);
2760                }
2761        }
2762
2763        write_journal(ic, start_section, n_sections);
2764}
2765
2766static int find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, commit_id_t id)
2767{
2768        unsigned char k;
2769        for (k = 0; k < N_COMMIT_IDS; k++) {
2770                if (dm_integrity_commit_id(ic, i, j, k) == id)
2771                        return k;
2772        }
2773        dm_integrity_io_error(ic, "journal commit id", -EIO);
2774        return -EIO;
2775}
2776
2777static void replay_journal(struct dm_integrity_c *ic)
2778{
2779        unsigned i, j;
2780        bool used_commit_ids[N_COMMIT_IDS];
2781        unsigned max_commit_id_sections[N_COMMIT_IDS];
2782        unsigned write_start, write_sections;
2783        unsigned continue_section;
2784        bool journal_empty;
2785        unsigned char unused, last_used, want_commit_seq;
2786
2787        if (ic->mode == 'R')
2788                return;
2789
2790        if (ic->journal_uptodate)
2791                return;
2792
2793        last_used = 0;
2794        write_start = 0;
2795
2796        if (!ic->just_formatted) {
2797                DEBUG_print("reading journal\n");
2798                rw_journal(ic, REQ_OP_READ, 0, 0, ic->journal_sections, NULL);
2799                if (ic->journal_io)
2800                        DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal");
2801                if (ic->journal_io) {
2802                        struct journal_completion crypt_comp;
2803                        crypt_comp.ic = ic;
2804                        init_completion(&crypt_comp.comp);
2805                        crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0);
2806                        encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp);
2807                        wait_for_completion(&crypt_comp.comp);
2808                }
2809                DEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, "decrypted journal");
2810        }
2811
2812        if (dm_integrity_failed(ic))
2813                goto clear_journal;
2814
2815        journal_empty = true;
2816        memset(used_commit_ids, 0, sizeof used_commit_ids);
2817        memset(max_commit_id_sections, 0, sizeof max_commit_id_sections);
2818        for (i = 0; i < ic->journal_sections; i++) {
2819                for (j = 0; j < ic->journal_section_sectors; j++) {
2820                        int k;
2821                        struct journal_sector *js = access_journal(ic, i, j);
2822                        k = find_commit_seq(ic, i, j, js->commit_id);
2823                        if (k < 0)
2824                                goto clear_journal;
2825                        used_commit_ids[k] = true;
2826                        max_commit_id_sections[k] = i;
2827                }
2828                if (journal_empty) {
2829                        for (j = 0; j < ic->journal_section_entries; j++) {
2830                                struct journal_entry *je = access_journal_entry(ic, i, j);
2831                                if (!journal_entry_is_unused(je)) {
2832                                        journal_empty = false;
2833                                        break;
2834                                }
2835                        }
2836                }
2837        }
2838
2839        if (!used_commit_ids[N_COMMIT_IDS - 1]) {
2840                unused = N_COMMIT_IDS - 1;
2841                while (unused && !used_commit_ids[unused - 1])
2842                        unused--;
2843        } else {
2844                for (unused = 0; unused < N_COMMIT_IDS; unused++)
2845                        if (!used_commit_ids[unused])
2846                                break;
2847                if (unused == N_COMMIT_IDS) {
2848                        dm_integrity_io_error(ic, "journal commit ids", -EIO);
2849                        goto clear_journal;
2850                }
2851        }
2852        DEBUG_print("first unused commit seq %d [%d,%d,%d,%d]\n",
2853                    unused, used_commit_ids[0], used_commit_ids[1],
2854                    used_commit_ids[2], used_commit_ids[3]);
2855
2856        last_used = prev_commit_seq(unused);
2857        want_commit_seq = prev_commit_seq(last_used);
2858
2859        if (!used_commit_ids[want_commit_seq] && used_commit_ids[prev_commit_seq(want_commit_seq)])
2860                journal_empty = true;
2861
2862        write_start = max_commit_id_sections[last_used] + 1;
2863        if (unlikely(write_start >= ic->journal_sections))
2864                want_commit_seq = next_commit_seq(want_commit_seq);
2865        wraparound_section(ic, &write_start);
2866
2867        i = write_start;
2868        for (write_sections = 0; write_sections < ic->journal_sections; write_sections++) {
2869                for (j = 0; j < ic->journal_section_sectors; j++) {
2870                        struct journal_sector *js = access_journal(ic, i, j);
2871
2872                        if (js->commit_id != dm_integrity_commit_id(ic, i, j, want_commit_seq)) {
2873                                /*
2874                                 * This could be caused by crash during writing.
2875                                 * We won't replay the inconsistent part of the
2876                                 * journal.
2877                                 */
2878                                DEBUG_print("commit id mismatch at position (%u, %u): %d != %d\n",
2879                                            i, j, find_commit_seq(ic, i, j, js->commit_id), want_commit_seq);
2880                                goto brk;
2881                        }
2882                }
2883                i++;
2884                if (unlikely(i >= ic->journal_sections))
2885                        want_commit_seq = next_commit_seq(want_commit_seq);
2886                wraparound_section(ic, &i);
2887        }
2888brk:
2889
2890        if (!journal_empty) {
2891                DEBUG_print("replaying %u sections, starting at %u, commit seq %d\n",
2892                            write_sections, write_start, want_commit_seq);
2893                do_journal_write(ic, write_start, write_sections, true);
2894        }
2895
2896        if (write_sections == ic->journal_sections && (ic->mode == 'J' || journal_empty)) {
2897                continue_section = write_start;
2898                ic->commit_seq = want_commit_seq;
2899                DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq);
2900        } else {
2901                unsigned s;
2902                unsigned char erase_seq;
2903clear_journal:
2904                DEBUG_print("clearing journal\n");
2905
2906                erase_seq = prev_commit_seq(prev_commit_seq(last_used));
2907                s = write_start;
2908                init_journal(ic, s, 1, erase_seq);
2909                s++;
2910                wraparound_section(ic, &s);
2911                if (ic->journal_sections >= 2) {
2912                        init_journal(ic, s, ic->journal_sections - 2, erase_seq);
2913                        s += ic->journal_sections - 2;
2914                        wraparound_section(ic, &s);
2915                        init_journal(ic, s, 1, erase_seq);
2916                }
2917
2918                continue_section = 0;
2919                ic->commit_seq = next_commit_seq(erase_seq);
2920        }
2921
2922        ic->committed_section = continue_section;
2923        ic->n_committed_sections = 0;
2924
2925        ic->uncommitted_section = continue_section;
2926        ic->n_uncommitted_sections = 0;
2927
2928        ic->free_section = continue_section;
2929        ic->free_section_entry = 0;
2930        ic->free_sectors = ic->journal_entries;
2931
2932        ic->journal_tree_root = RB_ROOT;
2933        for (i = 0; i < ic->journal_entries; i++)
2934                init_journal_node(&ic->journal_tree[i]);
2935}
2936
2937static void dm_integrity_enter_synchronous_mode(struct dm_integrity_c *ic)
2938{
2939        DEBUG_print("dm_integrity_enter_synchronous_mode\n");
2940
2941        if (ic->mode == 'B') {
2942                ic->bitmap_flush_interval = msecs_to_jiffies(10) + 1;
2943                ic->synchronous_mode = 1;
2944
2945                cancel_delayed_work_sync(&ic->bitmap_flush_work);
2946                queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
2947                flush_workqueue(ic->commit_wq);
2948        }
2949}
2950
2951static int dm_integrity_reboot(struct notifier_block *n, unsigned long code, void *x)
2952{
2953        struct dm_integrity_c *ic = container_of(n, struct dm_integrity_c, reboot_notifier);
2954
2955        DEBUG_print("dm_integrity_reboot\n");
2956
2957        dm_integrity_enter_synchronous_mode(ic);
2958
2959        return NOTIFY_DONE;
2960}
2961
2962static void dm_integrity_postsuspend(struct dm_target *ti)
2963{
2964        struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2965        int r;
2966
2967        WARN_ON(unregister_reboot_notifier(&ic->reboot_notifier));
2968
2969        del_timer_sync(&ic->autocommit_timer);
2970
2971        if (ic->recalc_wq)
2972                drain_workqueue(ic->recalc_wq);
2973
2974        if (ic->mode == 'B')
2975                cancel_delayed_work_sync(&ic->bitmap_flush_work);
2976
2977        queue_work(ic->commit_wq, &ic->commit_work);
2978        drain_workqueue(ic->commit_wq);
2979
2980        if (ic->mode == 'J') {
2981                if (ic->meta_dev)
2982                        queue_work(ic->writer_wq, &ic->writer_work);
2983                drain_workqueue(ic->writer_wq);
2984                dm_integrity_flush_buffers(ic, true);
2985        }
2986
2987        if (ic->mode == 'B') {
2988                dm_integrity_flush_buffers(ic, true);
2989#if 1
2990                /* set to 0 to test bitmap replay code */
2991                init_journal(ic, 0, ic->journal_sections, 0);
2992                ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
2993                r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
2994                if (unlikely(r))
2995                        dm_integrity_io_error(ic, "writing superblock", r);
2996#endif
2997        }
2998
2999        BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
3000
3001        ic->journal_uptodate = true;
3002}
3003
3004static void dm_integrity_resume(struct dm_target *ti)
3005{
3006        struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
3007        __u64 old_provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors);
3008        int r;
3009
3010        DEBUG_print("resume\n");
3011
3012        if (ic->provided_data_sectors != old_provided_data_sectors) {
3013                if (ic->provided_data_sectors > old_provided_data_sectors &&
3014                    ic->mode == 'B' &&
3015                    ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) {
3016                        rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
3017                                           ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3018                        block_bitmap_op(ic, ic->journal, old_provided_data_sectors,
3019                                        ic->provided_data_sectors - old_provided_data_sectors, BITMAP_OP_SET);
3020                        rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
3021                                           ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3022                }
3023
3024                ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
3025                r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3026                if (unlikely(r))
3027                        dm_integrity_io_error(ic, "writing superblock", r);
3028        }
3029
3030        if (ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) {
3031                DEBUG_print("resume dirty_bitmap\n");
3032                rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
3033                                   ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3034                if (ic->mode == 'B') {
3035                        if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) {
3036                                block_bitmap_copy(ic, ic->recalc_bitmap, ic->journal);
3037                                block_bitmap_copy(ic, ic->may_write_bitmap, ic->journal);
3038                                if (!block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors,
3039                                                     BITMAP_OP_TEST_ALL_CLEAR)) {
3040                                        ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3041                                        ic->sb->recalc_sector = cpu_to_le64(0);
3042                                }
3043                        } else {
3044                                DEBUG_print("non-matching blocks_per_bitmap_bit: %u, %u\n",
3045                                            ic->sb->log2_blocks_per_bitmap_bit, ic->log2_blocks_per_bitmap_bit);
3046                                ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
3047                                block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3048                                block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3049                                block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3050                                rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
3051                                                   ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3052                                ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3053                                ic->sb->recalc_sector = cpu_to_le64(0);
3054                        }
3055                } else {
3056                        if (!(ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
3057                              block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR))) {
3058                                ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3059                                ic->sb->recalc_sector = cpu_to_le64(0);
3060                        }
3061                        init_journal(ic, 0, ic->journal_sections, 0);
3062                        replay_journal(ic);
3063                        ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3064                }
3065                r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3066                if (unlikely(r))
3067                        dm_integrity_io_error(ic, "writing superblock", r);
3068        } else {
3069                replay_journal(ic);
3070                if (ic->mode == 'B') {
3071                        ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3072                        ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
3073                        r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3074                        if (unlikely(r))
3075                                dm_integrity_io_error(ic, "writing superblock", r);
3076
3077                        block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3078                        block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3079                        block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3080                        if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
3081                            le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors) {
3082                                block_bitmap_op(ic, ic->journal, le64_to_cpu(ic->sb->recalc_sector),
3083                                                ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3084                                block_bitmap_op(ic, ic->recalc_bitmap, le64_to_cpu(ic->sb->recalc_sector),
3085                                                ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3086                                block_bitmap_op(ic, ic->may_write_bitmap, le64_to_cpu(ic->sb->recalc_sector),
3087                                                ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3088                        }
3089                        rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
3090                                           ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3091                }
3092        }
3093
3094        DEBUG_print("testing recalc: %x\n", ic->sb->flags);
3095        if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
3096                __u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector);
3097                DEBUG_print("recalc pos: %llx / %llx\n", recalc_pos, ic->provided_data_sectors);
3098                if (recalc_pos < ic->provided_data_sectors) {
3099                        queue_work(ic->recalc_wq, &ic->recalc_work);
3100                } else if (recalc_pos > ic->provided_data_sectors) {
3101                        ic->sb->recalc_sector = cpu_to_le64(ic->provided_data_sectors);
3102                        recalc_write_super(ic);
3103                }
3104        }
3105
3106        ic->reboot_notifier.notifier_call = dm_integrity_reboot;
3107        ic->reboot_notifier.next = NULL;
3108        ic->reboot_notifier.priority = INT_MAX - 1;     /* be notified after md and before hardware drivers */
3109        WARN_ON(register_reboot_notifier(&ic->reboot_notifier));
3110
3111#if 0
3112        /* set to 1 to stress test synchronous mode */
3113        dm_integrity_enter_synchronous_mode(ic);
3114#endif
3115}
3116
3117static void dm_integrity_status(struct dm_target *ti, status_type_t type,
3118                                unsigned status_flags, char *result, unsigned maxlen)
3119{
3120        struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
3121        unsigned arg_count;
3122        size_t sz = 0;
3123
3124        switch (type) {
3125        case STATUSTYPE_INFO:
3126                DMEMIT("%llu %llu",
3127                        (unsigned long long)atomic64_read(&ic->number_of_mismatches),
3128                        ic->provided_data_sectors);
3129                if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
3130                        DMEMIT(" %llu", le64_to_cpu(ic->sb->recalc_sector));
3131                else
3132                        DMEMIT(" -");
3133                break;
3134
3135        case STATUSTYPE_TABLE: {
3136                __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
3137                watermark_percentage += ic->journal_entries / 2;
3138                do_div(watermark_percentage, ic->journal_entries);
3139                arg_count = 3;
3140                arg_count += !!ic->meta_dev;
3141                arg_count += ic->sectors_per_block != 1;
3142                arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING));
3143                arg_count += ic->discard;
3144                arg_count += ic->mode == 'J';
3145                arg_count += ic->mode == 'J';
3146                arg_count += ic->mode == 'B';
3147                arg_count += ic->mode == 'B';
3148                arg_count += !!ic->internal_hash_alg.alg_string;
3149                arg_count += !!ic->journal_crypt_alg.alg_string;
3150                arg_count += !!ic->journal_mac_alg.alg_string;
3151                arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0;
3152                arg_count += ic->legacy_recalculate;
3153                DMEMIT("%s %llu %u %c %u", ic->dev->name, ic->start,
3154                       ic->tag_size, ic->mode, arg_count);
3155                if (ic->meta_dev)
3156                        DMEMIT(" meta_device:%s", ic->meta_dev->name);
3157                if (ic->sectors_per_block != 1)
3158                        DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
3159                if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
3160                        DMEMIT(" recalculate");
3161                if (ic->discard)
3162                        DMEMIT(" allow_discards");
3163                DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
3164                DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
3165                DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
3166                if (ic->mode == 'J') {
3167                        DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage);
3168                        DMEMIT(" commit_time:%u", ic->autocommit_msec);
3169                }
3170                if (ic->mode == 'B') {
3171                        DMEMIT(" sectors_per_bit:%llu", (sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit);
3172                        DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic->bitmap_flush_interval));
3173                }
3174                if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0)
3175                        DMEMIT(" fix_padding");
3176                if (ic->legacy_recalculate)
3177                        DMEMIT(" legacy_recalculate");
3178
3179#define EMIT_ALG(a, n)                                                  \
3180                do {                                                    \
3181                        if (ic->a.alg_string) {                         \
3182                                DMEMIT(" %s:%s", n, ic->a.alg_string);  \
3183                                if (ic->a.key_string)                   \
3184                                        DMEMIT(":%s", ic->a.key_string);\
3185                        }                                               \
3186                } while (0)
3187                EMIT_ALG(internal_hash_alg, "internal_hash");
3188                EMIT_ALG(journal_crypt_alg, "journal_crypt");
3189                EMIT_ALG(journal_mac_alg, "journal_mac");
3190                break;
3191        }
3192        }
3193}
3194
3195static int dm_integrity_iterate_devices(struct dm_target *ti,
3196                                        iterate_devices_callout_fn fn, void *data)
3197{
3198        struct dm_integrity_c *ic = ti->private;
3199
3200        if (!ic->meta_dev)
3201                return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data);
3202        else
3203                return fn(ti, ic->dev, 0, ti->len, data);
3204}
3205
3206static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *limits)
3207{
3208        struct dm_integrity_c *ic = ti->private;
3209
3210        if (ic->sectors_per_block > 1) {
3211                limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
3212                limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
3213                blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
3214        }
3215}
3216
3217static void calculate_journal_section_size(struct dm_integrity_c *ic)
3218{
3219        unsigned sector_space = JOURNAL_SECTOR_DATA;
3220
3221        ic->journal_sections = le32_to_cpu(ic->sb->journal_sections);
3222        ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block]) + ic->tag_size,
3223                                         JOURNAL_ENTRY_ROUNDUP);
3224
3225        if (ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC))
3226                sector_space -= JOURNAL_MAC_PER_SECTOR;
3227        ic->journal_entries_per_sector = sector_space / ic->journal_entry_size;
3228        ic->journal_section_entries = ic->journal_entries_per_sector * JOURNAL_BLOCK_SECTORS;
3229        ic->journal_section_sectors = (ic->journal_section_entries << ic->sb->log2_sectors_per_block) + JOURNAL_BLOCK_SECTORS;
3230        ic->journal_entries = ic->journal_section_entries * ic->journal_sections;
3231}
3232
3233static int calculate_device_limits(struct dm_integrity_c *ic)
3234{
3235        __u64 initial_sectors;
3236
3237        calculate_journal_section_size(ic);
3238        initial_sectors = SB_SECTORS + (__u64)ic->journal_section_sectors * ic->journal_sections;
3239        if (initial_sectors + METADATA_PADDING_SECTORS >= ic->meta_device_sectors || initial_sectors > UINT_MAX)
3240                return -EINVAL;
3241        ic->initial_sectors = initial_sectors;
3242
3243        if (!ic->meta_dev) {
3244                sector_t last_sector, last_area, last_offset;
3245
3246                /* we have to maintain excessive padding for compatibility with existing volumes */
3247                __u64 metadata_run_padding =
3248                        ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING) ?
3249                        (__u64)(METADATA_PADDING_SECTORS << SECTOR_SHIFT) :
3250                        (__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS);
3251
3252                ic->metadata_run = round_up((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block),
3253                                            metadata_run_padding) >> SECTOR_SHIFT;
3254                if (!(ic->metadata_run & (ic->metadata_run - 1)))
3255                        ic->log2_metadata_run = __ffs(ic->metadata_run);
3256                else
3257                        ic->log2_metadata_run = -1;
3258
3259                get_area_and_offset(ic, ic->provided_data_sectors - 1, &last_area, &last_offset);
3260                last_sector = get_data_sector(ic, last_area, last_offset);
3261                if (last_sector < ic->start || last_sector >= ic->meta_device_sectors)
3262                        return -EINVAL;
3263        } else {
3264                __u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
3265                meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1))
3266                                >> (ic->log2_buffer_sectors + SECTOR_SHIFT);
3267                meta_size <<= ic->log2_buffer_sectors;
3268                if (ic->initial_sectors + meta_size < ic->initial_sectors ||
3269                    ic->initial_sectors + meta_size > ic->meta_device_sectors)
3270                        return -EINVAL;
3271                ic->metadata_run = 1;
3272                ic->log2_metadata_run = 0;
3273        }
3274
3275        return 0;
3276}
3277
3278static void get_provided_data_sectors(struct dm_integrity_c *ic)
3279{
3280        if (!ic->meta_dev) {
3281                int test_bit;
3282                ic->provided_data_sectors = 0;
3283                for (test_bit = fls64(ic->meta_device_sectors) - 1; test_bit >= 3; test_bit--) {
3284                        __u64 prev_data_sectors = ic->provided_data_sectors;
3285
3286                        ic->provided_data_sectors |= (sector_t)1 << test_bit;
3287                        if (calculate_device_limits(ic))
3288                                ic->provided_data_sectors = prev_data_sectors;
3289                }
3290        } else {
3291                ic->provided_data_sectors = ic->data_device_sectors;
3292                ic->provided_data_sectors &= ~(sector_t)(ic->sectors_per_block - 1);
3293        }
3294}
3295
3296static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sectors, unsigned interleave_sectors)
3297{
3298        unsigned journal_sections;
3299        int test_bit;
3300
3301        memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT);
3302        memcpy(ic->sb->magic, SB_MAGIC, 8);
3303        ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size);
3304        ic->sb->log2_sectors_per_block = __ffs(ic->sectors_per_block);
3305        if (ic->journal_mac_alg.alg_string)
3306                ic->sb->flags |= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC);
3307
3308        calculate_journal_section_size(ic);
3309        journal_sections = journal_sectors / ic->journal_section_sectors;
3310        if (!journal_sections)
3311                journal_sections = 1;
3312
3313        if (!ic->meta_dev) {
3314                if (ic->fix_padding)
3315                        ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_PADDING);
3316                ic->sb->journal_sections = cpu_to_le32(journal_sections);
3317                if (!interleave_sectors)
3318                        interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
3319                ic->sb->log2_interleave_sectors = __fls(interleave_sectors);
3320                ic->sb->log2_interleave_sectors = max((__u8)MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
3321                ic->sb->log2_interleave_sectors = min((__u8)MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
3322
3323                get_provided_data_sectors(ic);
3324                if (!ic->provided_data_sectors)
3325                        return -EINVAL;
3326        } else {
3327                ic->sb->log2_interleave_sectors = 0;
3328
3329                get_provided_data_sectors(ic);
3330                if (!ic->provided_data_sectors)
3331                        return -EINVAL;
3332
3333try_smaller_buffer:
3334                ic->sb->journal_sections = cpu_to_le32(0);
3335                for (test_bit = fls(journal_sections) - 1; test_bit >= 0; test_bit--) {
3336                        __u32 prev_journal_sections = le32_to_cpu(ic->sb->journal_sections);
3337                        __u32 test_journal_sections = prev_journal_sections | (1U << test_bit);
3338                        if (test_journal_sections > journal_sections)
3339                                continue;
3340                        ic->sb->journal_sections = cpu_to_le32(test_journal_sections);
3341                        if (calculate_device_limits(ic))
3342                                ic->sb->journal_sections = cpu_to_le32(prev_journal_sections);
3343
3344                }
3345                if (!le32_to_cpu(ic->sb->journal_sections)) {
3346                        if (ic->log2_buffer_sectors > 3) {
3347                                ic->log2_buffer_sectors--;
3348                                goto try_smaller_buffer;
3349                        }
3350                        return -EINVAL;
3351                }
3352        }
3353
3354        ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
3355
3356        sb_set_version(ic);
3357
3358        return 0;
3359}
3360
3361static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
3362{
3363        struct gendisk *disk = dm_disk(dm_table_get_md(ti->table));
3364        struct blk_integrity bi;
3365
3366        memset(&bi, 0, sizeof(bi));
3367        bi.profile = &dm_integrity_profile;
3368        bi.tuple_size = ic->tag_size;
3369        bi.tag_size = bi.tuple_size;
3370        bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT;
3371
3372        blk_integrity_register(disk, &bi);
3373        blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
3374}
3375
3376static void dm_integrity_free_page_list(struct page_list *pl)
3377{
3378        unsigned i;
3379
3380        if (!pl)
3381                return;
3382        for (i = 0; pl[i].page; i++)
3383                __free_page(pl[i].page);
3384        kvfree(pl);
3385}
3386
3387static struct page_list *dm_integrity_alloc_page_list(unsigned n_pages)
3388{
3389        struct page_list *pl;
3390        unsigned i;
3391
3392        pl = kvmalloc_array(n_pages + 1, sizeof(struct page_list), GFP_KERNEL | __GFP_ZERO);
3393        if (!pl)
3394                return NULL;
3395
3396        for (i = 0; i < n_pages; i++) {
3397                pl[i].page = alloc_page(GFP_KERNEL);
3398                if (!pl[i].page) {
3399                        dm_integrity_free_page_list(pl);
3400                        return NULL;
3401                }
3402                if (i)
3403                        pl[i - 1].next = &pl[i];
3404        }
3405        pl[i].page = NULL;
3406        pl[i].next = NULL;
3407
3408        return pl;
3409}
3410
3411static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl)
3412{
3413        unsigned i;
3414        for (i = 0; i < ic->journal_sections; i++)
3415                kvfree(sl[i]);
3416        kvfree(sl);
3417}
3418
3419static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic,
3420                                                                   struct page_list *pl)
3421{
3422        struct scatterlist **sl;
3423        unsigned i;
3424
3425        sl = kvmalloc_array(ic->journal_sections,
3426                            sizeof(struct scatterlist *),
3427                            GFP_KERNEL | __GFP_ZERO);
3428        if (!sl)
3429                return NULL;
3430
3431        for (i = 0; i < ic->journal_sections; i++) {
3432                struct scatterlist *s;
3433                unsigned start_index, start_offset;
3434                unsigned end_index, end_offset;
3435                unsigned n_pages;
3436                unsigned idx;
3437
3438                page_list_location(ic, i, 0, &start_index, &start_offset);
3439                page_list_location(ic, i, ic->journal_section_sectors - 1,
3440                                   &end_index, &end_offset);
3441
3442                n_pages = (end_index - start_index + 1);
3443
3444                s = kvmalloc_array(n_pages, sizeof(struct scatterlist),
3445                                   GFP_KERNEL);
3446                if (!s) {
3447                        dm_integrity_free_journal_scatterlist(ic, sl);
3448                        return NULL;
3449                }
3450
3451                sg_init_table(s, n_pages);
3452                for (idx = start_index; idx <= end_index; idx++) {
3453                        char *va = lowmem_page_address(pl[idx].page);
3454                        unsigned start = 0, end = PAGE_SIZE;
3455                        if (idx == start_index)
3456                                start = start_offset;
3457                        if (idx == end_index)
3458                                end = end_offset + (1 << SECTOR_SHIFT);
3459                        sg_set_buf(&s[idx - start_index], va + start, end - start);
3460                }
3461
3462                sl[i] = s;
3463        }
3464
3465        return sl;
3466}
3467
3468static void free_alg(struct alg_spec *a)
3469{
3470        kfree_sensitive(a->alg_string);
3471        kfree_sensitive(a->key);
3472        memset(a, 0, sizeof *a);
3473}
3474
3475static int get_alg_and_key(const char *arg, struct alg_spec *a, char **error, char *error_inval)
3476{
3477        char *k;
3478
3479        free_alg(a);
3480
3481        a->alg_string = kstrdup(strchr(arg, ':') + 1, GFP_KERNEL);
3482        if (!a->alg_string)
3483                goto nomem;
3484
3485        k = strchr(a->alg_string, ':');
3486        if (k) {
3487                *k = 0;
3488                a->key_string = k + 1;
3489                if (strlen(a->key_string) & 1)
3490                        goto inval;
3491
3492                a->key_size = strlen(a->key_string) / 2;
3493                a->key = kmalloc(a->key_size, GFP_KERNEL);
3494                if (!a->key)
3495                        goto nomem;
3496                if (hex2bin(a->key, a->key_string, a->key_size))
3497                        goto inval;
3498        }
3499
3500        return 0;
3501inval:
3502        *error = error_inval;
3503        return -EINVAL;
3504nomem:
3505        *error = "Out of memory for an argument";
3506        return -ENOMEM;
3507}
3508
3509static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
3510                   char *error_alg, char *error_key)
3511{
3512        int r;
3513
3514        if (a->alg_string) {
3515                *hash = crypto_alloc_shash(a->alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
3516                if (IS_ERR(*hash)) {
3517                        *error = error_alg;
3518                        r = PTR_ERR(*hash);
3519                        *hash = NULL;
3520                        return r;
3521                }
3522
3523                if (a->key) {
3524                        r = crypto_shash_setkey(*hash, a->key, a->key_size);
3525                        if (r) {
3526                                *error = error_key;
3527                                return r;
3528                        }
3529                } else if (crypto_shash_get_flags(*hash) & CRYPTO_TFM_NEED_KEY) {
3530                        *error = error_key;
3531                        return -ENOKEY;
3532                }
3533        }
3534
3535        return 0;
3536}
3537
3538static int create_journal(struct dm_integrity_c *ic, char **error)
3539{
3540        int r = 0;
3541        unsigned i;
3542        __u64 journal_pages, journal_desc_size, journal_tree_size;
3543        unsigned char *crypt_data = NULL, *crypt_iv = NULL;
3544        struct skcipher_request *req = NULL;
3545
3546        ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
3547        ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
3548        ic->commit_ids[2] = cpu_to_le64(0x3333333333333333ULL);
3549        ic->commit_ids[3] = cpu_to_le64(0x4444444444444444ULL);
3550
3551        journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors,
3552                                PAGE_SIZE >> SECTOR_SHIFT) >> (PAGE_SHIFT - SECTOR_SHIFT);
3553        journal_desc_size = journal_pages * sizeof(struct page_list);
3554        if (journal_pages >= totalram_pages() - totalhigh_pages() || journal_desc_size > ULONG_MAX) {
3555                *error = "Journal doesn't fit into memory";
3556                r = -ENOMEM;
3557                goto bad;
3558        }
3559        ic->journal_pages = journal_pages;
3560
3561        ic->journal = dm_integrity_alloc_page_list(ic->journal_pages);
3562        if (!ic->journal) {
3563                *error = "Could not allocate memory for journal";
3564                r = -ENOMEM;
3565                goto bad;
3566        }
3567        if (ic->journal_crypt_alg.alg_string) {
3568                unsigned ivsize, blocksize;
3569                struct journal_completion comp;
3570
3571                comp.ic = ic;
3572                ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
3573                if (IS_ERR(ic->journal_crypt)) {
3574                        *error = "Invalid journal cipher";
3575                        r = PTR_ERR(ic->journal_crypt);
3576                        ic->journal_crypt = NULL;
3577                        goto bad;
3578                }
3579                ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
3580                blocksize = crypto_skcipher_blocksize(ic->journal_crypt);
3581
3582                if (ic->journal_crypt_alg.key) {
3583                        r = crypto_skcipher_setkey(ic->journal_crypt, ic->journal_crypt_alg.key,
3584                                                   ic->journal_crypt_alg.key_size);
3585                        if (r) {
3586                                *error = "Error setting encryption key";
3587                                goto bad;
3588                        }
3589                }
3590                DEBUG_print("cipher %s, block size %u iv size %u\n",
3591                            ic->journal_crypt_alg.alg_string, blocksize, ivsize);
3592
3593                ic->journal_io = dm_integrity_alloc_page_list(ic->journal_pages);
3594                if (!ic->journal_io) {
3595                        *error = "Could not allocate memory for journal io";
3596                        r = -ENOMEM;
3597                        goto bad;
3598                }
3599
3600                if (blocksize == 1) {
3601                        struct scatterlist *sg;
3602
3603                        req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3604                        if (!req) {
3605                                *error = "Could not allocate crypt request";
3606                                r = -ENOMEM;
3607                                goto bad;
3608                        }
3609
3610                        crypt_iv = kzalloc(ivsize, GFP_KERNEL);
3611                        if (!crypt_iv) {
3612                                *error = "Could not allocate iv";
3613                                r = -ENOMEM;
3614                                goto bad;
3615                        }
3616
3617                        ic->journal_xor = dm_integrity_alloc_page_list(ic->journal_pages);
3618                        if (!ic->journal_xor) {
3619                                *error = "Could not allocate memory for journal xor";
3620                                r = -ENOMEM;
3621                                goto bad;
3622                        }
3623
3624                        sg = kvmalloc_array(ic->journal_pages + 1,
3625                                            sizeof(struct scatterlist),
3626                                            GFP_KERNEL);
3627                        if (!sg) {
3628                                *error = "Unable to allocate sg list";
3629                                r = -ENOMEM;
3630                                goto bad;
3631                        }
3632                        sg_init_table(sg, ic->journal_pages + 1);
3633                        for (i = 0; i < ic->journal_pages; i++) {
3634                                char *va = lowmem_page_address(ic->journal_xor[i].page);
3635                                clear_page(va);
3636                                sg_set_buf(&sg[i], va, PAGE_SIZE);
3637                        }
3638                        sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
3639
3640                        skcipher_request_set_crypt(req, sg, sg,
3641                                                   PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
3642                        init_completion(&comp.comp);
3643                        comp.in_flight = (atomic_t)ATOMIC_INIT(1);
3644                        if (do_crypt(true, req, &comp))
3645                                wait_for_completion(&comp.comp);
3646                        kvfree(sg);
3647                        r = dm_integrity_failed(ic);
3648                        if (r) {
3649                                *error = "Unable to encrypt journal";
3650                                goto bad;
3651                        }
3652                        DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data");
3653
3654                        crypto_free_skcipher(ic->journal_crypt);
3655                        ic->journal_crypt = NULL;
3656                } else {
3657                        unsigned crypt_len = roundup(ivsize, blocksize);
3658
3659                        req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3660                        if (!req) {
3661                                *error = "Could not allocate crypt request";
3662                                r = -ENOMEM;
3663                                goto bad;
3664                        }
3665
3666                        crypt_iv = kmalloc(ivsize, GFP_KERNEL);
3667                        if (!crypt_iv) {
3668                                *error = "Could not allocate iv";
3669                                r = -ENOMEM;
3670                                goto bad;
3671                        }
3672
3673                        crypt_data = kmalloc(crypt_len, GFP_KERNEL);
3674                        if (!crypt_data) {
3675                                *error = "Unable to allocate crypt data";
3676                                r = -ENOMEM;
3677                                goto bad;
3678                        }
3679
3680                        ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
3681                        if (!ic->journal_scatterlist) {
3682                                *error = "Unable to allocate sg list";
3683                                r = -ENOMEM;
3684                                goto bad;
3685                        }
3686                        ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal_io);
3687                        if (!ic->journal_io_scatterlist) {
3688                                *error = "Unable to allocate sg list";
3689                                r = -ENOMEM;
3690                                goto bad;
3691                        }
3692                        ic->sk_requests = kvmalloc_array(ic->journal_sections,
3693                                                         sizeof(struct skcipher_request *),
3694                                                         GFP_KERNEL | __GFP_ZERO);
3695                        if (!ic->sk_requests) {
3696                                *error = "Unable to allocate sk requests";
3697                                r = -ENOMEM;
3698                                goto bad;
3699                        }
3700                        for (i = 0; i < ic->journal_sections; i++) {
3701                                struct scatterlist sg;
3702                                struct skcipher_request *section_req;
3703                                __u32 section_le = cpu_to_le32(i);
3704
3705                                memset(crypt_iv, 0x00, ivsize);
3706                                memset(crypt_data, 0x00, crypt_len);
3707                                memcpy(crypt_data, &section_le, min((size_t)crypt_len, sizeof(section_le)));
3708
3709                                sg_init_one(&sg, crypt_data, crypt_len);
3710                                skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv);
3711                                init_completion(&comp.comp);
3712                                comp.in_flight = (atomic_t)ATOMIC_INIT(1);
3713                                if (do_crypt(true, req, &comp))
3714                                        wait_for_completion(&comp.comp);
3715
3716                                r = dm_integrity_failed(ic);
3717                                if (r) {
3718                                        *error = "Unable to generate iv";
3719                                        goto bad;
3720                                }
3721
3722                                section_req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3723                                if (!section_req) {
3724                                        *error = "Unable to allocate crypt request";
3725                                        r = -ENOMEM;
3726                                        goto bad;
3727                                }
3728                                section_req->iv = kmalloc_array(ivsize, 2,
3729                                                                GFP_KERNEL);
3730                                if (!section_req->iv) {
3731                                        skcipher_request_free(section_req);
3732                                        *error = "Unable to allocate iv";
3733                                        r = -ENOMEM;
3734                                        goto bad;
3735                                }
3736                                memcpy(section_req->iv + ivsize, crypt_data, ivsize);
3737                                section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT;
3738                                ic->sk_requests[i] = section_req;
3739                                DEBUG_bytes(crypt_data, ivsize, "iv(%u)", i);
3740                        }
3741                }
3742        }
3743
3744        for (i = 0; i < N_COMMIT_IDS; i++) {
3745                unsigned j;
3746retest_commit_id:
3747                for (j = 0; j < i; j++) {
3748                        if (ic->commit_ids[j] == ic->commit_ids[i]) {
3749                                ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1);
3750                                goto retest_commit_id;
3751                        }
3752                }
3753                DEBUG_print("commit id %u: %016llx\n", i, ic->commit_ids[i]);
3754        }
3755
3756        journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node);
3757        if (journal_tree_size > ULONG_MAX) {
3758                *error = "Journal doesn't fit into memory";
3759                r = -ENOMEM;
3760                goto bad;
3761        }
3762        ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL);
3763        if (!ic->journal_tree) {
3764                *error = "Could not allocate memory for journal tree";
3765                r = -ENOMEM;
3766        }
3767bad:
3768        kfree(crypt_data);
3769        kfree(crypt_iv);
3770        skcipher_request_free(req);
3771
3772        return r;
3773}
3774
3775/*
3776 * Construct a integrity mapping
3777 *
3778 * Arguments:
3779 *      device
3780 *      offset from the start of the device
3781 *      tag size
3782 *      D - direct writes, J - journal writes, B - bitmap mode, R - recovery mode
3783 *      number of optional arguments
3784 *      optional arguments:
3785 *              journal_sectors
3786 *              interleave_sectors
3787 *              buffer_sectors
3788 *              journal_watermark
3789 *              commit_time
3790 *              meta_device
3791 *              block_size
3792 *              sectors_per_bit
3793 *              bitmap_flush_interval
3794 *              internal_hash
3795 *              journal_crypt
3796 *              journal_mac
3797 *              recalculate
3798 */
3799static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
3800{
3801        struct dm_integrity_c *ic;
3802        char dummy;
3803        int r;
3804        unsigned extra_args;
3805        struct dm_arg_set as;
3806        static const struct dm_arg _args[] = {
3807                {0, 16, "Invalid number of feature args"},
3808        };
3809        unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
3810        bool should_write_sb;
3811        __u64 threshold;
3812        unsigned long long start;
3813        __s8 log2_sectors_per_bitmap_bit = -1;
3814        __s8 log2_blocks_per_bitmap_bit;
3815        __u64 bits_in_journal;
3816        __u64 n_bitmap_bits;
3817
3818#define DIRECT_ARGUMENTS        4
3819
3820        if (argc <= DIRECT_ARGUMENTS) {
3821                ti->error = "Invalid argument count";
3822                return -EINVAL;
3823        }
3824
3825        ic = kzalloc(sizeof(struct dm_integrity_c), GFP_KERNEL);
3826        if (!ic) {
3827                ti->error = "Cannot allocate integrity context";
3828                return -ENOMEM;
3829        }
3830        ti->private = ic;
3831        ti->per_io_data_size = sizeof(struct dm_integrity_io);
3832        ic->ti = ti;
3833
3834        ic->in_progress = RB_ROOT;
3835        INIT_LIST_HEAD(&ic->wait_list);
3836        init_waitqueue_head(&ic->endio_wait);
3837        bio_list_init(&ic->flush_bio_list);
3838        init_waitqueue_head(&ic->copy_to_journal_wait);
3839        init_completion(&ic->crypto_backoff);
3840        atomic64_set(&ic->number_of_mismatches, 0);
3841        ic->bitmap_flush_interval = BITMAP_FLUSH_INTERVAL;
3842
3843        r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev);
3844        if (r) {
3845                ti->error = "Device lookup failed";
3846                goto bad;
3847        }
3848
3849        if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) {
3850                ti->error = "Invalid starting offset";
3851                r = -EINVAL;
3852                goto bad;
3853        }
3854        ic->start = start;
3855
3856        if (strcmp(argv[2], "-")) {
3857                if (sscanf(argv[2], "%u%c", &ic->tag_size, &dummy) != 1 || !ic->tag_size) {
3858                        ti->error = "Invalid tag size";
3859                        r = -EINVAL;
3860                        goto bad;
3861                }
3862        }
3863
3864        if (!strcmp(argv[3], "J") || !strcmp(argv[3], "B") ||
3865            !strcmp(argv[3], "D") || !strcmp(argv[3], "R")) {
3866                ic->mode = argv[3][0];
3867        } else {
3868                ti->error = "Invalid mode (expecting J, B, D, R)";
3869                r = -EINVAL;
3870                goto bad;
3871        }
3872
3873        journal_sectors = 0;
3874        interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
3875        buffer_sectors = DEFAULT_BUFFER_SECTORS;
3876        journal_watermark = DEFAULT_JOURNAL_WATERMARK;
3877        sync_msec = DEFAULT_SYNC_MSEC;
3878        ic->sectors_per_block = 1;
3879
3880        as.argc = argc - DIRECT_ARGUMENTS;
3881        as.argv = argv + DIRECT_ARGUMENTS;
3882        r = dm_read_arg_group(_args, &as, &extra_args, &ti->error);
3883        if (r)
3884                goto bad;
3885
3886        while (extra_args--) {
3887                const char *opt_string;
3888                unsigned val;
3889                unsigned long long llval;
3890                opt_string = dm_shift_arg(&as);
3891                if (!opt_string) {
3892                        r = -EINVAL;
3893                        ti->error = "Not enough feature arguments";
3894                        goto bad;
3895                }
3896                if (sscanf(opt_string, "journal_sectors:%u%c", &val, &dummy) == 1)
3897                        journal_sectors = val ? val : 1;
3898                else if (sscanf(opt_string, "interleave_sectors:%u%c", &val, &dummy) == 1)
3899                        interleave_sectors = val;
3900                else if (sscanf(opt_string, "buffer_sectors:%u%c", &val, &dummy) == 1)
3901                        buffer_sectors = val;
3902                else if (sscanf(opt_string, "journal_watermark:%u%c", &val, &dummy) == 1 && val <= 100)
3903                        journal_watermark = val;
3904                else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
3905                        sync_msec = val;
3906                else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) {
3907                        if (ic->meta_dev) {
3908                                dm_put_device(ti, ic->meta_dev);
3909                                ic->meta_dev = NULL;
3910                        }
3911                        r = dm_get_device(ti, strchr(opt_string, ':') + 1,
3912                                          dm_table_get_mode(ti->table), &ic->meta_dev);
3913                        if (r) {
3914                                ti->error = "Device lookup failed";
3915                                goto bad;
3916                        }
3917                } else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) {
3918                        if (val < 1 << SECTOR_SHIFT ||
3919                            val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT ||
3920                            (val & (val -1))) {
3921                                r = -EINVAL;
3922                                ti->error = "Invalid block_size argument";
3923                                goto bad;
3924                        }
3925                        ic->sectors_per_block = val >> SECTOR_SHIFT;
3926                } else if (sscanf(opt_string, "sectors_per_bit:%llu%c", &llval, &dummy) == 1) {
3927                        log2_sectors_per_bitmap_bit = !llval ? 0 : __ilog2_u64(llval);
3928                } else if (sscanf(opt_string, "bitmap_flush_interval:%u%c", &val, &dummy) == 1) {
3929                        if (val >= (uint64_t)UINT_MAX * 1000 / HZ) {
3930                                r = -EINVAL;
3931                                ti->error = "Invalid bitmap_flush_interval argument";
3932                        }
3933                        ic->bitmap_flush_interval = msecs_to_jiffies(val);
3934                } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
3935                        r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
3936                                            "Invalid internal_hash argument");
3937                        if (r)
3938                                goto bad;
3939                } else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
3940                        r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
3941                                            "Invalid journal_crypt argument");
3942                        if (r)
3943                                goto bad;
3944                } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
3945                        r = get_alg_and_key(opt_string, &ic->journal_mac_alg,  &ti->error,
3946                                            "Invalid journal_mac argument");
3947                        if (r)
3948                                goto bad;
3949                } else if (!strcmp(opt_string, "recalculate")) {
3950                        ic->recalculate_flag = true;
3951                } else if (!strcmp(opt_string, "allow_discards")) {
3952                        ic->discard = true;
3953                } else if (!strcmp(opt_string, "fix_padding")) {
3954                        ic->fix_padding = true;
3955                } else if (!strcmp(opt_string, "legacy_recalculate")) {
3956                        ic->legacy_recalculate = true;
3957                } else {
3958                        r = -EINVAL;
3959                        ti->error = "Invalid argument";
3960                        goto bad;
3961                }
3962        }
3963
3964        ic->data_device_sectors = i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT;
3965        if (!ic->meta_dev)
3966                ic->meta_device_sectors = ic->data_device_sectors;
3967        else
3968                ic->meta_device_sectors = i_size_read(ic->meta_dev->bdev->bd_inode) >> SECTOR_SHIFT;
3969
3970        if (!journal_sectors) {
3971                journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS,
3972                                      ic->data_device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR);
3973        }
3974
3975        if (!buffer_sectors)
3976                buffer_sectors = 1;
3977        ic->log2_buffer_sectors = min((int)__fls(buffer_sectors), 31 - SECTOR_SHIFT);
3978
3979        r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error,
3980                    "Invalid internal hash", "Error setting internal hash key");
3981        if (r)
3982                goto bad;
3983
3984        r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error,
3985                    "Invalid journal mac", "Error setting journal mac key");
3986        if (r)
3987                goto bad;
3988
3989        if (!ic->tag_size) {
3990                if (!ic->internal_hash) {
3991                        ti->error = "Unknown tag size";
3992                        r = -EINVAL;
3993                        goto bad;
3994                }
3995                ic->tag_size = crypto_shash_digestsize(ic->internal_hash);
3996        }
3997        if (ic->tag_size > MAX_TAG_SIZE) {
3998                ti->error = "Too big tag size";
3999                r = -EINVAL;
4000                goto bad;
4001        }
4002        if (!(ic->tag_size & (ic->tag_size - 1)))
4003                ic->log2_tag_size = __ffs(ic->tag_size);
4004        else
4005                ic->log2_tag_size = -1;
4006
4007        if (ic->mode == 'B' && !ic->internal_hash) {
4008                r = -EINVAL;
4009                ti->error = "Bitmap mode can be only used with internal hash";
4010                goto bad;
4011        }
4012
4013        if (ic->discard && !ic->internal_hash) {
4014                r = -EINVAL;
4015                ti->error = "Discard can be only used with internal hash";
4016                goto bad;
4017        }
4018
4019        ic->autocommit_jiffies = msecs_to_jiffies(sync_msec);
4020        ic->autocommit_msec = sync_msec;
4021        timer_setup(&ic->autocommit_timer, autocommit_fn, 0);
4022
4023        ic->io = dm_io_client_create();
4024        if (IS_ERR(ic->io)) {
4025                r = PTR_ERR(ic->io);
4026                ic->io = NULL;
4027                ti->error = "Cannot allocate dm io";
4028                goto bad;
4029        }
4030
4031        r = mempool_init_slab_pool(&ic->journal_io_mempool, JOURNAL_IO_MEMPOOL, journal_io_cache);
4032        if (r) {
4033                ti->error = "Cannot allocate mempool";
4034                goto bad;
4035        }
4036
4037        ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
4038                                          WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE);
4039        if (!ic->metadata_wq) {
4040                ti->error = "Cannot allocate workqueue";
4041                r = -ENOMEM;
4042                goto bad;
4043        }
4044
4045        /*
4046         * If this workqueue were percpu, it would cause bio reordering
4047         * and reduced performance.
4048         */
4049        ic->wait_wq = alloc_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
4050        if (!ic->wait_wq) {
4051                ti->error = "Cannot allocate workqueue";
4052                r = -ENOMEM;
4053                goto bad;
4054        }
4055
4056        ic->offload_wq = alloc_workqueue("dm-integrity-offload", WQ_MEM_RECLAIM,
4057                                          METADATA_WORKQUEUE_MAX_ACTIVE);
4058        if (!ic->offload_wq) {
4059                ti->error = "Cannot allocate workqueue";
4060                r = -ENOMEM;
4061                goto bad;
4062        }
4063
4064        ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1);
4065        if (!ic->commit_wq) {
4066                ti->error = "Cannot allocate workqueue";
4067                r = -ENOMEM;
4068                goto bad;
4069        }
4070        INIT_WORK(&ic->commit_work, integrity_commit);
4071
4072        if (ic->mode == 'J' || ic->mode == 'B') {
4073                ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1);
4074                if (!ic->writer_wq) {
4075                        ti->error = "Cannot allocate workqueue";
4076                        r = -ENOMEM;
4077                        goto bad;
4078                }
4079                INIT_WORK(&ic->writer_work, integrity_writer);
4080        }
4081
4082        ic->sb = alloc_pages_exact(SB_SECTORS << SECTOR_SHIFT, GFP_KERNEL);
4083        if (!ic->sb) {
4084                r = -ENOMEM;
4085                ti->error = "Cannot allocate superblock area";
4086                goto bad;
4087        }
4088
4089        r = sync_rw_sb(ic, REQ_OP_READ, 0);
4090        if (r) {
4091                ti->error = "Error reading superblock";
4092                goto bad;
4093        }
4094        should_write_sb = false;
4095        if (memcmp(ic->sb->magic, SB_MAGIC, 8)) {
4096                if (ic->mode != 'R') {
4097                        if (memchr_inv(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT)) {
4098                                r = -EINVAL;
4099                                ti->error = "The device is not initialized";
4100                                goto bad;
4101                        }
4102                }
4103
4104                r = initialize_superblock(ic, journal_sectors, interleave_sectors);
4105                if (r) {
4106                        ti->error = "Could not initialize superblock";
4107                        goto bad;
4108                }
4109                if (ic->mode != 'R')
4110                        should_write_sb = true;
4111        }
4112
4113        if (!ic->sb->version || ic->sb->version > SB_VERSION_4) {
4114                r = -EINVAL;
4115                ti->error = "Unknown version";
4116                goto bad;
4117        }
4118        if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) {
4119                r = -EINVAL;
4120                ti->error = "Tag size doesn't match the information in superblock";
4121                goto bad;
4122        }
4123        if (ic->sb->log2_sectors_per_block != __ffs(ic->sectors_per_block)) {
4124                r = -EINVAL;
4125                ti->error = "Block size doesn't match the information in superblock";
4126                goto bad;
4127        }
4128        if (!le32_to_cpu(ic->sb->journal_sections)) {
4129                r = -EINVAL;
4130                ti->error = "Corrupted superblock, journal_sections is 0";
4131                goto bad;
4132        }
4133        /* make sure that ti->max_io_len doesn't overflow */
4134        if (!ic->meta_dev) {
4135                if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
4136                    ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
4137                        r = -EINVAL;
4138                        ti->error = "Invalid interleave_sectors in the superblock";
4139                        goto bad;
4140                }
4141        } else {
4142                if (ic->sb->log2_interleave_sectors) {
4143                        r = -EINVAL;
4144                        ti->error = "Invalid interleave_sectors in the superblock";
4145                        goto bad;
4146                }
4147        }
4148        if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string) {
4149                r = -EINVAL;
4150                ti->error = "Journal mac mismatch";
4151                goto bad;
4152        }
4153
4154        get_provided_data_sectors(ic);
4155        if (!ic->provided_data_sectors) {
4156                r = -EINVAL;
4157                ti->error = "The device is too small";
4158                goto bad;
4159        }
4160
4161try_smaller_buffer:
4162        r = calculate_device_limits(ic);
4163        if (r) {
4164                if (ic->meta_dev) {
4165                        if (ic->log2_buffer_sectors > 3) {
4166                                ic->log2_buffer_sectors--;
4167                                goto try_smaller_buffer;
4168                        }
4169                }
4170                ti->error = "The device is too small";
4171                goto bad;
4172        }
4173
4174        if (log2_sectors_per_bitmap_bit < 0)
4175                log2_sectors_per_bitmap_bit = __fls(DEFAULT_SECTORS_PER_BITMAP_BIT);
4176        if (log2_sectors_per_bitmap_bit < ic->sb->log2_sectors_per_block)
4177                log2_sectors_per_bitmap_bit = ic->sb->log2_sectors_per_block;
4178
4179        bits_in_journal = ((__u64)ic->journal_section_sectors * ic->journal_sections) << (SECTOR_SHIFT + 3);
4180        if (bits_in_journal > UINT_MAX)
4181                bits_in_journal = UINT_MAX;
4182        while (bits_in_journal < (ic->provided_data_sectors + ((sector_t)1 << log2_sectors_per_bitmap_bit) - 1) >> log2_sectors_per_bitmap_bit)
4183                log2_sectors_per_bitmap_bit++;
4184
4185        log2_blocks_per_bitmap_bit = log2_sectors_per_bitmap_bit - ic->sb->log2_sectors_per_block;
4186        ic->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
4187        if (should_write_sb) {
4188                ic->sb->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
4189        }
4190        n_bitmap_bits = ((ic->provided_data_sectors >> ic->sb->log2_sectors_per_block)
4191                                + (((sector_t)1 << log2_blocks_per_bitmap_bit) - 1)) >> log2_blocks_per_bitmap_bit;
4192        ic->n_bitmap_blocks = DIV_ROUND_UP(n_bitmap_bits, BITMAP_BLOCK_SIZE * 8);
4193
4194        if (!ic->meta_dev)
4195                ic->log2_buffer_sectors = min(ic->log2_buffer_sectors, (__u8)__ffs(ic->metadata_run));
4196
4197        if (ti->len > ic->provided_data_sectors) {
4198                r = -EINVAL;
4199                ti->error = "Not enough provided sectors for requested mapping size";
4200                goto bad;
4201        }
4202
4203
4204        threshold = (__u64)ic->journal_entries * (100 - journal_watermark);
4205        threshold += 50;
4206        do_div(threshold, 100);
4207        ic->free_sectors_threshold = threshold;
4208
4209        DEBUG_print("initialized:\n");
4210        DEBUG_print("   integrity_tag_size %u\n", le16_to_cpu(ic->sb->integrity_tag_size));
4211        DEBUG_print("   journal_entry_size %u\n", ic->journal_entry_size);
4212        DEBUG_print("   journal_entries_per_sector %u\n", ic->journal_entries_per_sector);
4213        DEBUG_print("   journal_section_entries %u\n", ic->journal_section_entries);
4214        DEBUG_print("   journal_section_sectors %u\n", ic->journal_section_sectors);
4215        DEBUG_print("   journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
4216        DEBUG_print("   journal_entries %u\n", ic->journal_entries);
4217        DEBUG_print("   log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
4218        DEBUG_print("   data_device_sectors 0x%llx\n", i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT);
4219        DEBUG_print("   initial_sectors 0x%x\n", ic->initial_sectors);
4220        DEBUG_print("   metadata_run 0x%x\n", ic->metadata_run);
4221        DEBUG_print("   log2_metadata_run %d\n", ic->log2_metadata_run);
4222        DEBUG_print("   provided_data_sectors 0x%llx (%llu)\n", ic->provided_data_sectors, ic->provided_data_sectors);
4223        DEBUG_print("   log2_buffer_sectors %u\n", ic->log2_buffer_sectors);
4224        DEBUG_print("   bits_in_journal %llu\n", bits_in_journal);
4225
4226        if (ic->recalculate_flag && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) {
4227                ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
4228                ic->sb->recalc_sector = cpu_to_le64(0);
4229        }
4230
4231        if (ic->internal_hash) {
4232                ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
4233                if (!ic->recalc_wq ) {
4234                        ti->error = "Cannot allocate workqueue";
4235                        r = -ENOMEM;
4236                        goto bad;
4237                }
4238                INIT_WORK(&ic->recalc_work, integrity_recalc);
4239                ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT);
4240                if (!ic->recalc_buffer) {
4241                        ti->error = "Cannot allocate buffer for recalculating";
4242                        r = -ENOMEM;
4243                        goto bad;
4244                }
4245                ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
4246                                                 ic->tag_size, GFP_KERNEL);
4247                if (!ic->recalc_tags) {
4248                        ti->error = "Cannot allocate tags for recalculating";
4249                        r = -ENOMEM;
4250                        goto bad;
4251                }
4252        } else {
4253                if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
4254                        ti->error = "Recalculate can only be specified with internal_hash";
4255                        r = -EINVAL;
4256                        goto bad;
4257                }
4258        }
4259
4260        if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
4261            le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors &&
4262            dm_integrity_disable_recalculate(ic)) {
4263                ti->error = "Recalculating with HMAC is disabled for security reasons - if you really need it, use the argument \"legacy_recalculate\"";
4264                r = -EOPNOTSUPP;
4265                goto bad;
4266        }
4267
4268        ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
4269                        1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL);
4270        if (IS_ERR(ic->bufio)) {
4271                r = PTR_ERR(ic->bufio);
4272                ti->error = "Cannot initialize dm-bufio";
4273                ic->bufio = NULL;
4274                goto bad;
4275        }
4276        dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors);
4277
4278        if (ic->mode != 'R') {
4279                r = create_journal(ic, &ti->error);
4280                if (r)
4281                        goto bad;
4282
4283        }
4284
4285        if (ic->mode == 'B') {
4286                unsigned i;
4287                unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
4288
4289                ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
4290                if (!ic->recalc_bitmap) {
4291                        r = -ENOMEM;
4292                        goto bad;
4293                }
4294                ic->may_write_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
4295                if (!ic->may_write_bitmap) {
4296                        r = -ENOMEM;
4297                        goto bad;
4298                }
4299                ic->bbs = kvmalloc_array(ic->n_bitmap_blocks, sizeof(struct bitmap_block_status), GFP_KERNEL);
4300                if (!ic->bbs) {
4301                        r = -ENOMEM;
4302                        goto bad;
4303                }
4304                INIT_DELAYED_WORK(&ic->bitmap_flush_work, bitmap_flush_work);
4305                for (i = 0; i < ic->n_bitmap_blocks; i++) {
4306                        struct bitmap_block_status *bbs = &ic->bbs[i];
4307                        unsigned sector, pl_index, pl_offset;
4308
4309                        INIT_WORK(&bbs->work, bitmap_block_work);
4310                        bbs->ic = ic;
4311                        bbs->idx = i;
4312                        bio_list_init(&bbs->bio_queue);
4313                        spin_lock_init(&bbs->bio_queue_lock);
4314
4315                        sector = i * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT);
4316                        pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
4317                        pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
4318
4319                        bbs->bitmap = lowmem_page_address(ic->journal[pl_index].page) + pl_offset;
4320                }
4321        }
4322
4323        if (should_write_sb) {
4324                int r;
4325
4326                init_journal(ic, 0, ic->journal_sections, 0);
4327                r = dm_integrity_failed(ic);
4328                if (unlikely(r)) {
4329                        ti->error = "Error initializing journal";
4330                        goto bad;
4331                }
4332                r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
4333                if (r) {
4334                        ti->error = "Error initializing superblock";
4335                        goto bad;
4336                }
4337                ic->just_formatted = true;
4338        }
4339
4340        if (!ic->meta_dev) {
4341                r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors);
4342                if (r)
4343                        goto bad;
4344        }
4345        if (ic->mode == 'B') {
4346                unsigned max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8);
4347                if (!max_io_len)
4348                        max_io_len = 1U << 31;
4349                DEBUG_print("max_io_len: old %u, new %u\n", ti->max_io_len, max_io_len);
4350                if (!ti->max_io_len || ti->max_io_len > max_io_len) {
4351                        r = dm_set_target_max_io_len(ti, max_io_len);
4352                        if (r)
4353                                goto bad;
4354                }
4355        }
4356
4357        if (!ic->internal_hash)
4358                dm_integrity_set(ti, ic);
4359
4360        ti->num_flush_bios = 1;
4361        ti->flush_supported = true;
4362        if (ic->discard)
4363                ti->num_discard_bios = 1;
4364
4365        return 0;
4366
4367bad:
4368        dm_integrity_dtr(ti);
4369        return r;
4370}
4371
4372static void dm_integrity_dtr(struct dm_target *ti)
4373{
4374        struct dm_integrity_c *ic = ti->private;
4375
4376        BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
4377        BUG_ON(!list_empty(&ic->wait_list));
4378
4379        if (ic->metadata_wq)
4380                destroy_workqueue(ic->metadata_wq);
4381        if (ic->wait_wq)
4382                destroy_workqueue(ic->wait_wq);
4383        if (ic->offload_wq)
4384                destroy_workqueue(ic->offload_wq);
4385        if (ic->commit_wq)
4386                destroy_workqueue(ic->commit_wq);
4387        if (ic->writer_wq)
4388                destroy_workqueue(ic->writer_wq);
4389        if (ic->recalc_wq)
4390                destroy_workqueue(ic->recalc_wq);
4391        vfree(ic->recalc_buffer);
4392        kvfree(ic->recalc_tags);
4393        kvfree(ic->bbs);
4394        if (ic->bufio)
4395                dm_bufio_client_destroy(ic->bufio);
4396        mempool_exit(&ic->journal_io_mempool);
4397        if (ic->io)
4398                dm_io_client_destroy(ic->io);
4399        if (ic->dev)
4400                dm_put_device(ti, ic->dev);
4401        if (ic->meta_dev)
4402                dm_put_device(ti, ic->meta_dev);
4403        dm_integrity_free_page_list(ic->journal);
4404        dm_integrity_free_page_list(ic->journal_io);
4405        dm_integrity_free_page_list(ic->journal_xor);
4406        dm_integrity_free_page_list(ic->recalc_bitmap);
4407        dm_integrity_free_page_list(ic->may_write_bitmap);
4408        if (ic->journal_scatterlist)
4409                dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist);
4410        if (ic->journal_io_scatterlist)
4411                dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist);
4412        if (ic->sk_requests) {
4413                unsigned i;
4414
4415                for (i = 0; i < ic->journal_sections; i++) {
4416                        struct skcipher_request *req = ic->sk_requests[i];
4417                        if (req) {
4418                                kfree_sensitive(req->iv);
4419                                skcipher_request_free(req);
4420                        }
4421                }
4422                kvfree(ic->sk_requests);
4423        }
4424        kvfree(ic->journal_tree);
4425        if (ic->sb)
4426                free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT);
4427
4428        if (ic->internal_hash)
4429                crypto_free_shash(ic->internal_hash);
4430        free_alg(&ic->internal_hash_alg);
4431
4432        if (ic->journal_crypt)
4433                crypto_free_skcipher(ic->journal_crypt);
4434        free_alg(&ic->journal_crypt_alg);
4435
4436        if (ic->journal_mac)
4437                crypto_free_shash(ic->journal_mac);
4438        free_alg(&ic->journal_mac_alg);
4439
4440        kfree(ic);
4441}
4442
4443static struct target_type integrity_target = {
4444        .name                   = "integrity",
4445        .version                = {1, 6, 0},
4446        .module                 = THIS_MODULE,
4447        .features               = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
4448        .ctr                    = dm_integrity_ctr,
4449        .dtr                    = dm_integrity_dtr,
4450        .map                    = dm_integrity_map,
4451        .postsuspend            = dm_integrity_postsuspend,
4452        .resume                 = dm_integrity_resume,
4453        .status                 = dm_integrity_status,
4454        .iterate_devices        = dm_integrity_iterate_devices,
4455        .io_hints               = dm_integrity_io_hints,
4456};
4457
4458static int __init dm_integrity_init(void)
4459{
4460        int r;
4461
4462        journal_io_cache = kmem_cache_create("integrity_journal_io",
4463                                             sizeof(struct journal_io), 0, 0, NULL);
4464        if (!journal_io_cache) {
4465                DMERR("can't allocate journal io cache");
4466                return -ENOMEM;
4467        }
4468
4469        r = dm_register_target(&integrity_target);
4470
4471        if (r < 0)
4472                DMERR("register failed %d", r);
4473
4474        return r;
4475}
4476
4477static void __exit dm_integrity_exit(void)
4478{
4479        dm_unregister_target(&integrity_target);
4480        kmem_cache_destroy(journal_io_cache);
4481}
4482
4483module_init(dm_integrity_init);
4484module_exit(dm_integrity_exit);
4485
4486MODULE_AUTHOR("Milan Broz");
4487MODULE_AUTHOR("Mikulas Patocka");
4488MODULE_DESCRIPTION(DM_NAME " target for integrity tags extension");
4489MODULE_LICENSE("GPL");
4490