linux/drivers/md/raid5-ppl.c
<<
>>
Prefs
   1/*
   2 * Partial Parity Log for closing the RAID5 write hole
   3 * Copyright (c) 2017, Intel Corporation.
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms and conditions of the GNU General Public License,
   7 * version 2, as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 */
  14
  15#include <linux/kernel.h>
  16#include <linux/blkdev.h>
  17#include <linux/slab.h>
  18#include <linux/crc32c.h>
  19#include <linux/async_tx.h>
  20#include <linux/raid/md_p.h>
  21#include "md.h"
  22#include "raid5.h"
  23#include "raid5-log.h"
  24
  25/*
  26 * PPL consists of a 4KB header (struct ppl_header) and at least 128KB for
  27 * partial parity data. The header contains an array of entries
  28 * (struct ppl_header_entry) which describe the logged write requests.
  29 * Partial parity for the entries comes after the header, written in the same
  30 * sequence as the entries:
  31 *
  32 * Header
  33 *   entry0
  34 *   ...
  35 *   entryN
  36 * PP data
  37 *   PP for entry0
  38 *   ...
  39 *   PP for entryN
  40 *
  41 * An entry describes one or more consecutive stripe_heads, up to a full
  42 * stripe. The modifed raid data chunks form an m-by-n matrix, where m is the
  43 * number of stripe_heads in the entry and n is the number of modified data
  44 * disks. Every stripe_head in the entry must write to the same data disks.
  45 * An example of a valid case described by a single entry (writes to the first
  46 * stripe of a 4 disk array, 16k chunk size):
  47 *
  48 * sh->sector   dd0   dd1   dd2    ppl
  49 *            +-----+-----+-----+
  50 * 0          | --- | --- | --- | +----+
  51 * 8          | -W- | -W- | --- | | pp |   data_sector = 8
  52 * 16         | -W- | -W- | --- | | pp |   data_size = 3 * 2 * 4k
  53 * 24         | -W- | -W- | --- | | pp |   pp_size = 3 * 4k
  54 *            +-----+-----+-----+ +----+
  55 *
  56 * data_sector is the first raid sector of the modified data, data_size is the
  57 * total size of modified data and pp_size is the size of partial parity for
  58 * this entry. Entries for full stripe writes contain no partial parity
  59 * (pp_size = 0), they only mark the stripes for which parity should be
  60 * recalculated after an unclean shutdown. Every entry holds a checksum of its
  61 * partial parity, the header also has a checksum of the header itself.
  62 *
  63 * A write request is always logged to the PPL instance stored on the parity
  64 * disk of the corresponding stripe. For each member disk there is one ppl_log
  65 * used to handle logging for this disk, independently from others. They are
  66 * grouped in child_logs array in struct ppl_conf, which is assigned to
  67 * r5conf->log_private.
  68 *
  69 * ppl_io_unit represents a full PPL write, header_page contains the ppl_header.
  70 * PPL entries for logged stripes are added in ppl_log_stripe(). A stripe_head
  71 * can be appended to the last entry if it meets the conditions for a valid
  72 * entry described above, otherwise a new entry is added. Checksums of entries
  73 * are calculated incrementally as stripes containing partial parity are being
  74 * added. ppl_submit_iounit() calculates the checksum of the header and submits
  75 * a bio containing the header page and partial parity pages (sh->ppl_page) for
  76 * all stripes of the io_unit. When the PPL write completes, the stripes
  77 * associated with the io_unit are released and raid5d starts writing their data
  78 * and parity. When all stripes are written, the io_unit is freed and the next
  79 * can be submitted.
  80 *
  81 * An io_unit is used to gather stripes until it is submitted or becomes full
  82 * (if the maximum number of entries or size of PPL is reached). Another io_unit
  83 * can't be submitted until the previous has completed (PPL and stripe
  84 * data+parity is written). The log->io_list tracks all io_units of a log
  85 * (for a single member disk). New io_units are added to the end of the list
  86 * and the first io_unit is submitted, if it is not submitted already.
  87 * The current io_unit accepting new stripes is always at the end of the list.
  88 *
  89 * If write-back cache is enabled for any of the disks in the array, its data
  90 * must be flushed before next io_unit is submitted.
  91 */
  92
  93#define PPL_SPACE_SIZE (128 * 1024)
  94
  95struct ppl_conf {
  96        struct mddev *mddev;
  97
  98        /* array of child logs, one for each raid disk */
  99        struct ppl_log *child_logs;
 100        int count;
 101
 102        int block_size;         /* the logical block size used for data_sector
 103                                 * in ppl_header_entry */
 104        u32 signature;          /* raid array identifier */
 105        atomic64_t seq;         /* current log write sequence number */
 106
 107        struct kmem_cache *io_kc;
 108        mempool_t io_pool;
 109        struct bio_set bs;
 110        struct bio_set flush_bs;
 111
 112        /* used only for recovery */
 113        int recovered_entries;
 114        int mismatch_count;
 115
 116        /* stripes to retry if failed to allocate io_unit */
 117        struct list_head no_mem_stripes;
 118        spinlock_t no_mem_stripes_lock;
 119
 120        unsigned short write_hint;
 121};
 122
 123struct ppl_log {
 124        struct ppl_conf *ppl_conf;      /* shared between all log instances */
 125
 126        struct md_rdev *rdev;           /* array member disk associated with
 127                                         * this log instance */
 128        struct mutex io_mutex;
 129        struct ppl_io_unit *current_io; /* current io_unit accepting new data
 130                                         * always at the end of io_list */
 131        spinlock_t io_list_lock;
 132        struct list_head io_list;       /* all io_units of this log */
 133
 134        sector_t next_io_sector;
 135        unsigned int entry_space;
 136        bool use_multippl;
 137        bool wb_cache_on;
 138        unsigned long disk_flush_bitmap;
 139};
 140
 141#define PPL_IO_INLINE_BVECS 32
 142
 143struct ppl_io_unit {
 144        struct ppl_log *log;
 145
 146        struct page *header_page;       /* for ppl_header */
 147
 148        unsigned int entries_count;     /* number of entries in ppl_header */
 149        unsigned int pp_size;           /* total size current of partial parity */
 150
 151        u64 seq;                        /* sequence number of this log write */
 152        struct list_head log_sibling;   /* log->io_list */
 153
 154        struct list_head stripe_list;   /* stripes added to the io_unit */
 155        atomic_t pending_stripes;       /* how many stripes not written to raid */
 156        atomic_t pending_flushes;       /* how many disk flushes are in progress */
 157
 158        bool submitted;                 /* true if write to log started */
 159
 160        /* inline bio and its biovec for submitting the iounit */
 161        struct bio bio;
 162        struct bio_vec biovec[PPL_IO_INLINE_BVECS];
 163};
 164
 165struct dma_async_tx_descriptor *
 166ops_run_partial_parity(struct stripe_head *sh, struct raid5_percpu *percpu,
 167                       struct dma_async_tx_descriptor *tx)
 168{
 169        int disks = sh->disks;
 170        struct page **srcs = percpu->scribble;
 171        int count = 0, pd_idx = sh->pd_idx, i;
 172        struct async_submit_ctl submit;
 173
 174        pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
 175
 176        /*
 177         * Partial parity is the XOR of stripe data chunks that are not changed
 178         * during the write request. Depending on available data
 179         * (read-modify-write vs. reconstruct-write case) we calculate it
 180         * differently.
 181         */
 182        if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
 183                /*
 184                 * rmw: xor old data and parity from updated disks
 185                 * This is calculated earlier by ops_run_prexor5() so just copy
 186                 * the parity dev page.
 187                 */
 188                srcs[count++] = sh->dev[pd_idx].page;
 189        } else if (sh->reconstruct_state == reconstruct_state_drain_run) {
 190                /* rcw: xor data from all not updated disks */
 191                for (i = disks; i--;) {
 192                        struct r5dev *dev = &sh->dev[i];
 193                        if (test_bit(R5_UPTODATE, &dev->flags))
 194                                srcs[count++] = dev->page;
 195                }
 196        } else {
 197                return tx;
 198        }
 199
 200        init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, tx,
 201                          NULL, sh, (void *) (srcs + sh->disks + 2));
 202
 203        if (count == 1)
 204                tx = async_memcpy(sh->ppl_page, srcs[0], 0, 0, PAGE_SIZE,
 205                                  &submit);
 206        else
 207                tx = async_xor(sh->ppl_page, srcs, 0, count, PAGE_SIZE,
 208                               &submit);
 209
 210        return tx;
 211}
 212
 213static void *ppl_io_pool_alloc(gfp_t gfp_mask, void *pool_data)
 214{
 215        struct kmem_cache *kc = pool_data;
 216        struct ppl_io_unit *io;
 217
 218        io = kmem_cache_alloc(kc, gfp_mask);
 219        if (!io)
 220                return NULL;
 221
 222        io->header_page = alloc_page(gfp_mask);
 223        if (!io->header_page) {
 224                kmem_cache_free(kc, io);
 225                return NULL;
 226        }
 227
 228        return io;
 229}
 230
 231static void ppl_io_pool_free(void *element, void *pool_data)
 232{
 233        struct kmem_cache *kc = pool_data;
 234        struct ppl_io_unit *io = element;
 235
 236        __free_page(io->header_page);
 237        kmem_cache_free(kc, io);
 238}
 239
 240static struct ppl_io_unit *ppl_new_iounit(struct ppl_log *log,
 241                                          struct stripe_head *sh)
 242{
 243        struct ppl_conf *ppl_conf = log->ppl_conf;
 244        struct ppl_io_unit *io;
 245        struct ppl_header *pplhdr;
 246        struct page *header_page;
 247
 248        io = mempool_alloc(&ppl_conf->io_pool, GFP_NOWAIT);
 249        if (!io)
 250                return NULL;
 251
 252        header_page = io->header_page;
 253        memset(io, 0, sizeof(*io));
 254        io->header_page = header_page;
 255
 256        io->log = log;
 257        INIT_LIST_HEAD(&io->log_sibling);
 258        INIT_LIST_HEAD(&io->stripe_list);
 259        atomic_set(&io->pending_stripes, 0);
 260        atomic_set(&io->pending_flushes, 0);
 261        bio_init(&io->bio, io->biovec, PPL_IO_INLINE_BVECS);
 262
 263        pplhdr = page_address(io->header_page);
 264        clear_page(pplhdr);
 265        memset(pplhdr->reserved, 0xff, PPL_HDR_RESERVED);
 266        pplhdr->signature = cpu_to_le32(ppl_conf->signature);
 267
 268        io->seq = atomic64_add_return(1, &ppl_conf->seq);
 269        pplhdr->generation = cpu_to_le64(io->seq);
 270
 271        return io;
 272}
 273
 274static int ppl_log_stripe(struct ppl_log *log, struct stripe_head *sh)
 275{
 276        struct ppl_io_unit *io = log->current_io;
 277        struct ppl_header_entry *e = NULL;
 278        struct ppl_header *pplhdr;
 279        int i;
 280        sector_t data_sector = 0;
 281        int data_disks = 0;
 282        struct r5conf *conf = sh->raid_conf;
 283
 284        pr_debug("%s: stripe: %llu\n", __func__, (unsigned long long)sh->sector);
 285
 286        /* check if current io_unit is full */
 287        if (io && (io->pp_size == log->entry_space ||
 288                   io->entries_count == PPL_HDR_MAX_ENTRIES)) {
 289                pr_debug("%s: add io_unit blocked by seq: %llu\n",
 290                         __func__, io->seq);
 291                io = NULL;
 292        }
 293
 294        /* add a new unit if there is none or the current is full */
 295        if (!io) {
 296                io = ppl_new_iounit(log, sh);
 297                if (!io)
 298                        return -ENOMEM;
 299                spin_lock_irq(&log->io_list_lock);
 300                list_add_tail(&io->log_sibling, &log->io_list);
 301                spin_unlock_irq(&log->io_list_lock);
 302
 303                log->current_io = io;
 304        }
 305
 306        for (i = 0; i < sh->disks; i++) {
 307                struct r5dev *dev = &sh->dev[i];
 308
 309                if (i != sh->pd_idx && test_bit(R5_Wantwrite, &dev->flags)) {
 310                        if (!data_disks || dev->sector < data_sector)
 311                                data_sector = dev->sector;
 312                        data_disks++;
 313                }
 314        }
 315        BUG_ON(!data_disks);
 316
 317        pr_debug("%s: seq: %llu data_sector: %llu data_disks: %d\n", __func__,
 318                 io->seq, (unsigned long long)data_sector, data_disks);
 319
 320        pplhdr = page_address(io->header_page);
 321
 322        if (io->entries_count > 0) {
 323                struct ppl_header_entry *last =
 324                                &pplhdr->entries[io->entries_count - 1];
 325                struct stripe_head *sh_last = list_last_entry(
 326                                &io->stripe_list, struct stripe_head, log_list);
 327                u64 data_sector_last = le64_to_cpu(last->data_sector);
 328                u32 data_size_last = le32_to_cpu(last->data_size);
 329
 330                /*
 331                 * Check if we can append the stripe to the last entry. It must
 332                 * be just after the last logged stripe and write to the same
 333                 * disks. Use bit shift and logarithm to avoid 64-bit division.
 334                 */
 335                if ((sh->sector == sh_last->sector + STRIPE_SECTORS) &&
 336                    (data_sector >> ilog2(conf->chunk_sectors) ==
 337                     data_sector_last >> ilog2(conf->chunk_sectors)) &&
 338                    ((data_sector - data_sector_last) * data_disks ==
 339                     data_size_last >> 9))
 340                        e = last;
 341        }
 342
 343        if (!e) {
 344                e = &pplhdr->entries[io->entries_count++];
 345                e->data_sector = cpu_to_le64(data_sector);
 346                e->parity_disk = cpu_to_le32(sh->pd_idx);
 347                e->checksum = cpu_to_le32(~0);
 348        }
 349
 350        le32_add_cpu(&e->data_size, data_disks << PAGE_SHIFT);
 351
 352        /* don't write any PP if full stripe write */
 353        if (!test_bit(STRIPE_FULL_WRITE, &sh->state)) {
 354                le32_add_cpu(&e->pp_size, PAGE_SIZE);
 355                io->pp_size += PAGE_SIZE;
 356                e->checksum = cpu_to_le32(crc32c_le(le32_to_cpu(e->checksum),
 357                                                    page_address(sh->ppl_page),
 358                                                    PAGE_SIZE));
 359        }
 360
 361        list_add_tail(&sh->log_list, &io->stripe_list);
 362        atomic_inc(&io->pending_stripes);
 363        sh->ppl_io = io;
 364
 365        return 0;
 366}
 367
 368int ppl_write_stripe(struct r5conf *conf, struct stripe_head *sh)
 369{
 370        struct ppl_conf *ppl_conf = conf->log_private;
 371        struct ppl_io_unit *io = sh->ppl_io;
 372        struct ppl_log *log;
 373
 374        if (io || test_bit(STRIPE_SYNCING, &sh->state) || !sh->ppl_page ||
 375            !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) ||
 376            !test_bit(R5_Insync, &sh->dev[sh->pd_idx].flags)) {
 377                clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
 378                return -EAGAIN;
 379        }
 380
 381        log = &ppl_conf->child_logs[sh->pd_idx];
 382
 383        mutex_lock(&log->io_mutex);
 384
 385        if (!log->rdev || test_bit(Faulty, &log->rdev->flags)) {
 386                mutex_unlock(&log->io_mutex);
 387                return -EAGAIN;
 388        }
 389
 390        set_bit(STRIPE_LOG_TRAPPED, &sh->state);
 391        clear_bit(STRIPE_DELAYED, &sh->state);
 392        atomic_inc(&sh->count);
 393
 394        if (ppl_log_stripe(log, sh)) {
 395                spin_lock_irq(&ppl_conf->no_mem_stripes_lock);
 396                list_add_tail(&sh->log_list, &ppl_conf->no_mem_stripes);
 397                spin_unlock_irq(&ppl_conf->no_mem_stripes_lock);
 398        }
 399
 400        mutex_unlock(&log->io_mutex);
 401
 402        return 0;
 403}
 404
 405static void ppl_log_endio(struct bio *bio)
 406{
 407        struct ppl_io_unit *io = bio->bi_private;
 408        struct ppl_log *log = io->log;
 409        struct ppl_conf *ppl_conf = log->ppl_conf;
 410        struct stripe_head *sh, *next;
 411
 412        pr_debug("%s: seq: %llu\n", __func__, io->seq);
 413
 414        if (bio->bi_status)
 415                md_error(ppl_conf->mddev, log->rdev);
 416
 417        list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) {
 418                list_del_init(&sh->log_list);
 419
 420                set_bit(STRIPE_HANDLE, &sh->state);
 421                raid5_release_stripe(sh);
 422        }
 423}
 424
 425static void ppl_submit_iounit_bio(struct ppl_io_unit *io, struct bio *bio)
 426{
 427        char b[BDEVNAME_SIZE];
 428
 429        pr_debug("%s: seq: %llu size: %u sector: %llu dev: %s\n",
 430                 __func__, io->seq, bio->bi_iter.bi_size,
 431                 (unsigned long long)bio->bi_iter.bi_sector,
 432                 bio_devname(bio, b));
 433
 434        submit_bio(bio);
 435}
 436
 437static void ppl_submit_iounit(struct ppl_io_unit *io)
 438{
 439        struct ppl_log *log = io->log;
 440        struct ppl_conf *ppl_conf = log->ppl_conf;
 441        struct ppl_header *pplhdr = page_address(io->header_page);
 442        struct bio *bio = &io->bio;
 443        struct stripe_head *sh;
 444        int i;
 445
 446        bio->bi_private = io;
 447
 448        if (!log->rdev || test_bit(Faulty, &log->rdev->flags)) {
 449                ppl_log_endio(bio);
 450                return;
 451        }
 452
 453        for (i = 0; i < io->entries_count; i++) {
 454                struct ppl_header_entry *e = &pplhdr->entries[i];
 455
 456                pr_debug("%s: seq: %llu entry: %d data_sector: %llu pp_size: %u data_size: %u\n",
 457                         __func__, io->seq, i, le64_to_cpu(e->data_sector),
 458                         le32_to_cpu(e->pp_size), le32_to_cpu(e->data_size));
 459
 460                e->data_sector = cpu_to_le64(le64_to_cpu(e->data_sector) >>
 461                                             ilog2(ppl_conf->block_size >> 9));
 462                e->checksum = cpu_to_le32(~le32_to_cpu(e->checksum));
 463        }
 464
 465        pplhdr->entries_count = cpu_to_le32(io->entries_count);
 466        pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PPL_HEADER_SIZE));
 467
 468        /* Rewind the buffer if current PPL is larger then remaining space */
 469        if (log->use_multippl &&
 470            log->rdev->ppl.sector + log->rdev->ppl.size - log->next_io_sector <
 471            (PPL_HEADER_SIZE + io->pp_size) >> 9)
 472                log->next_io_sector = log->rdev->ppl.sector;
 473
 474
 475        bio->bi_end_io = ppl_log_endio;
 476        bio->bi_opf = REQ_OP_WRITE | REQ_FUA;
 477        bio_set_dev(bio, log->rdev->bdev);
 478        bio->bi_iter.bi_sector = log->next_io_sector;
 479        bio_add_page(bio, io->header_page, PAGE_SIZE, 0);
 480        bio->bi_write_hint = ppl_conf->write_hint;
 481
 482        pr_debug("%s: log->current_io_sector: %llu\n", __func__,
 483            (unsigned long long)log->next_io_sector);
 484
 485        if (log->use_multippl)
 486                log->next_io_sector += (PPL_HEADER_SIZE + io->pp_size) >> 9;
 487
 488        WARN_ON(log->disk_flush_bitmap != 0);
 489
 490        list_for_each_entry(sh, &io->stripe_list, log_list) {
 491                for (i = 0; i < sh->disks; i++) {
 492                        struct r5dev *dev = &sh->dev[i];
 493
 494                        if ((ppl_conf->child_logs[i].wb_cache_on) &&
 495                            (test_bit(R5_Wantwrite, &dev->flags))) {
 496                                set_bit(i, &log->disk_flush_bitmap);
 497                        }
 498                }
 499
 500                /* entries for full stripe writes have no partial parity */
 501                if (test_bit(STRIPE_FULL_WRITE, &sh->state))
 502                        continue;
 503
 504                if (!bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0)) {
 505                        struct bio *prev = bio;
 506
 507                        bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES,
 508                                               &ppl_conf->bs);
 509                        bio->bi_opf = prev->bi_opf;
 510                        bio->bi_write_hint = prev->bi_write_hint;
 511                        bio_copy_dev(bio, prev);
 512                        bio->bi_iter.bi_sector = bio_end_sector(prev);
 513                        bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0);
 514
 515                        bio_chain(bio, prev);
 516                        ppl_submit_iounit_bio(io, prev);
 517                }
 518        }
 519
 520        ppl_submit_iounit_bio(io, bio);
 521}
 522
 523static void ppl_submit_current_io(struct ppl_log *log)
 524{
 525        struct ppl_io_unit *io;
 526
 527        spin_lock_irq(&log->io_list_lock);
 528
 529        io = list_first_entry_or_null(&log->io_list, struct ppl_io_unit,
 530                                      log_sibling);
 531        if (io && io->submitted)
 532                io = NULL;
 533
 534        spin_unlock_irq(&log->io_list_lock);
 535
 536        if (io) {
 537                io->submitted = true;
 538
 539                if (io == log->current_io)
 540                        log->current_io = NULL;
 541
 542                ppl_submit_iounit(io);
 543        }
 544}
 545
 546void ppl_write_stripe_run(struct r5conf *conf)
 547{
 548        struct ppl_conf *ppl_conf = conf->log_private;
 549        struct ppl_log *log;
 550        int i;
 551
 552        for (i = 0; i < ppl_conf->count; i++) {
 553                log = &ppl_conf->child_logs[i];
 554
 555                mutex_lock(&log->io_mutex);
 556                ppl_submit_current_io(log);
 557                mutex_unlock(&log->io_mutex);
 558        }
 559}
 560
 561static void ppl_io_unit_finished(struct ppl_io_unit *io)
 562{
 563        struct ppl_log *log = io->log;
 564        struct ppl_conf *ppl_conf = log->ppl_conf;
 565        struct r5conf *conf = ppl_conf->mddev->private;
 566        unsigned long flags;
 567
 568        pr_debug("%s: seq: %llu\n", __func__, io->seq);
 569
 570        local_irq_save(flags);
 571
 572        spin_lock(&log->io_list_lock);
 573        list_del(&io->log_sibling);
 574        spin_unlock(&log->io_list_lock);
 575
 576        mempool_free(io, &ppl_conf->io_pool);
 577
 578        spin_lock(&ppl_conf->no_mem_stripes_lock);
 579        if (!list_empty(&ppl_conf->no_mem_stripes)) {
 580                struct stripe_head *sh;
 581
 582                sh = list_first_entry(&ppl_conf->no_mem_stripes,
 583                                      struct stripe_head, log_list);
 584                list_del_init(&sh->log_list);
 585                set_bit(STRIPE_HANDLE, &sh->state);
 586                raid5_release_stripe(sh);
 587        }
 588        spin_unlock(&ppl_conf->no_mem_stripes_lock);
 589
 590        local_irq_restore(flags);
 591
 592        wake_up(&conf->wait_for_quiescent);
 593}
 594
 595static void ppl_flush_endio(struct bio *bio)
 596{
 597        struct ppl_io_unit *io = bio->bi_private;
 598        struct ppl_log *log = io->log;
 599        struct ppl_conf *ppl_conf = log->ppl_conf;
 600        struct r5conf *conf = ppl_conf->mddev->private;
 601        char b[BDEVNAME_SIZE];
 602
 603        pr_debug("%s: dev: %s\n", __func__, bio_devname(bio, b));
 604
 605        if (bio->bi_status) {
 606                struct md_rdev *rdev;
 607
 608                rcu_read_lock();
 609                rdev = md_find_rdev_rcu(conf->mddev, bio_dev(bio));
 610                if (rdev)
 611                        md_error(rdev->mddev, rdev);
 612                rcu_read_unlock();
 613        }
 614
 615        bio_put(bio);
 616
 617        if (atomic_dec_and_test(&io->pending_flushes)) {
 618                ppl_io_unit_finished(io);
 619                md_wakeup_thread(conf->mddev->thread);
 620        }
 621}
 622
 623static void ppl_do_flush(struct ppl_io_unit *io)
 624{
 625        struct ppl_log *log = io->log;
 626        struct ppl_conf *ppl_conf = log->ppl_conf;
 627        struct r5conf *conf = ppl_conf->mddev->private;
 628        int raid_disks = conf->raid_disks;
 629        int flushed_disks = 0;
 630        int i;
 631
 632        atomic_set(&io->pending_flushes, raid_disks);
 633
 634        for_each_set_bit(i, &log->disk_flush_bitmap, raid_disks) {
 635                struct md_rdev *rdev;
 636                struct block_device *bdev = NULL;
 637
 638                rcu_read_lock();
 639                rdev = rcu_dereference(conf->disks[i].rdev);
 640                if (rdev && !test_bit(Faulty, &rdev->flags))
 641                        bdev = rdev->bdev;
 642                rcu_read_unlock();
 643
 644                if (bdev) {
 645                        struct bio *bio;
 646                        char b[BDEVNAME_SIZE];
 647
 648                        bio = bio_alloc_bioset(GFP_NOIO, 0, &ppl_conf->flush_bs);
 649                        bio_set_dev(bio, bdev);
 650                        bio->bi_private = io;
 651                        bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
 652                        bio->bi_end_io = ppl_flush_endio;
 653
 654                        pr_debug("%s: dev: %s\n", __func__,
 655                                 bio_devname(bio, b));
 656
 657                        submit_bio(bio);
 658                        flushed_disks++;
 659                }
 660        }
 661
 662        log->disk_flush_bitmap = 0;
 663
 664        for (i = flushed_disks ; i < raid_disks; i++) {
 665                if (atomic_dec_and_test(&io->pending_flushes))
 666                        ppl_io_unit_finished(io);
 667        }
 668}
 669
 670static inline bool ppl_no_io_unit_submitted(struct r5conf *conf,
 671                                            struct ppl_log *log)
 672{
 673        struct ppl_io_unit *io;
 674
 675        io = list_first_entry_or_null(&log->io_list, struct ppl_io_unit,
 676                                      log_sibling);
 677
 678        return !io || !io->submitted;
 679}
 680
 681void ppl_quiesce(struct r5conf *conf, int quiesce)
 682{
 683        struct ppl_conf *ppl_conf = conf->log_private;
 684        int i;
 685
 686        if (quiesce) {
 687                for (i = 0; i < ppl_conf->count; i++) {
 688                        struct ppl_log *log = &ppl_conf->child_logs[i];
 689
 690                        spin_lock_irq(&log->io_list_lock);
 691                        wait_event_lock_irq(conf->wait_for_quiescent,
 692                                            ppl_no_io_unit_submitted(conf, log),
 693                                            log->io_list_lock);
 694                        spin_unlock_irq(&log->io_list_lock);
 695                }
 696        }
 697}
 698
 699int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio)
 700{
 701        if (bio->bi_iter.bi_size == 0) {
 702                bio_endio(bio);
 703                return 0;
 704        }
 705        bio->bi_opf &= ~REQ_PREFLUSH;
 706        return -EAGAIN;
 707}
 708
 709void ppl_stripe_write_finished(struct stripe_head *sh)
 710{
 711        struct ppl_io_unit *io;
 712
 713        io = sh->ppl_io;
 714        sh->ppl_io = NULL;
 715
 716        if (io && atomic_dec_and_test(&io->pending_stripes)) {
 717                if (io->log->disk_flush_bitmap)
 718                        ppl_do_flush(io);
 719                else
 720                        ppl_io_unit_finished(io);
 721        }
 722}
 723
 724static void ppl_xor(int size, struct page *page1, struct page *page2)
 725{
 726        struct async_submit_ctl submit;
 727        struct dma_async_tx_descriptor *tx;
 728        struct page *xor_srcs[] = { page1, page2 };
 729
 730        init_async_submit(&submit, ASYNC_TX_ACK|ASYNC_TX_XOR_DROP_DST,
 731                          NULL, NULL, NULL, NULL);
 732        tx = async_xor(page1, xor_srcs, 0, 2, size, &submit);
 733
 734        async_tx_quiesce(&tx);
 735}
 736
 737/*
 738 * PPL recovery strategy: xor partial parity and data from all modified data
 739 * disks within a stripe and write the result as the new stripe parity. If all
 740 * stripe data disks are modified (full stripe write), no partial parity is
 741 * available, so just xor the data disks.
 742 *
 743 * Recovery of a PPL entry shall occur only if all modified data disks are
 744 * available and read from all of them succeeds.
 745 *
 746 * A PPL entry applies to a stripe, partial parity size for an entry is at most
 747 * the size of the chunk. Examples of possible cases for a single entry:
 748 *
 749 * case 0: single data disk write:
 750 *   data0    data1    data2     ppl        parity
 751 * +--------+--------+--------+           +--------------------+
 752 * | ------ | ------ | ------ | +----+    | (no change)        |
 753 * | ------ | -data- | ------ | | pp | -> | data1 ^ pp         |
 754 * | ------ | -data- | ------ | | pp | -> | data1 ^ pp         |
 755 * | ------ | ------ | ------ | +----+    | (no change)        |
 756 * +--------+--------+--------+           +--------------------+
 757 * pp_size = data_size
 758 *
 759 * case 1: more than one data disk write:
 760 *   data0    data1    data2     ppl        parity
 761 * +--------+--------+--------+           +--------------------+
 762 * | ------ | ------ | ------ | +----+    | (no change)        |
 763 * | -data- | -data- | ------ | | pp | -> | data0 ^ data1 ^ pp |
 764 * | -data- | -data- | ------ | | pp | -> | data0 ^ data1 ^ pp |
 765 * | ------ | ------ | ------ | +----+    | (no change)        |
 766 * +--------+--------+--------+           +--------------------+
 767 * pp_size = data_size / modified_data_disks
 768 *
 769 * case 2: write to all data disks (also full stripe write):
 770 *   data0    data1    data2                parity
 771 * +--------+--------+--------+           +--------------------+
 772 * | ------ | ------ | ------ |           | (no change)        |
 773 * | -data- | -data- | -data- | --------> | xor all data       |
 774 * | ------ | ------ | ------ | --------> | (no change)        |
 775 * | ------ | ------ | ------ |           | (no change)        |
 776 * +--------+--------+--------+           +--------------------+
 777 * pp_size = 0
 778 *
 779 * The following cases are possible only in other implementations. The recovery
 780 * code can handle them, but they are not generated at runtime because they can
 781 * be reduced to cases 0, 1 and 2:
 782 *
 783 * case 3:
 784 *   data0    data1    data2     ppl        parity
 785 * +--------+--------+--------+ +----+    +--------------------+
 786 * | ------ | -data- | -data- | | pp |    | data1 ^ data2 ^ pp |
 787 * | ------ | -data- | -data- | | pp | -> | data1 ^ data2 ^ pp |
 788 * | -data- | -data- | -data- | | -- | -> | xor all data       |
 789 * | -data- | -data- | ------ | | pp |    | data0 ^ data1 ^ pp |
 790 * +--------+--------+--------+ +----+    +--------------------+
 791 * pp_size = chunk_size
 792 *
 793 * case 4:
 794 *   data0    data1    data2     ppl        parity
 795 * +--------+--------+--------+ +----+    +--------------------+
 796 * | ------ | -data- | ------ | | pp |    | data1 ^ pp         |
 797 * | ------ | ------ | ------ | | -- | -> | (no change)        |
 798 * | ------ | ------ | ------ | | -- | -> | (no change)        |
 799 * | -data- | ------ | ------ | | pp |    | data0 ^ pp         |
 800 * +--------+--------+--------+ +----+    +--------------------+
 801 * pp_size = chunk_size
 802 */
 803static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e,
 804                             sector_t ppl_sector)
 805{
 806        struct ppl_conf *ppl_conf = log->ppl_conf;
 807        struct mddev *mddev = ppl_conf->mddev;
 808        struct r5conf *conf = mddev->private;
 809        int block_size = ppl_conf->block_size;
 810        struct page *page1;
 811        struct page *page2;
 812        sector_t r_sector_first;
 813        sector_t r_sector_last;
 814        int strip_sectors;
 815        int data_disks;
 816        int i;
 817        int ret = 0;
 818        char b[BDEVNAME_SIZE];
 819        unsigned int pp_size = le32_to_cpu(e->pp_size);
 820        unsigned int data_size = le32_to_cpu(e->data_size);
 821
 822        page1 = alloc_page(GFP_KERNEL);
 823        page2 = alloc_page(GFP_KERNEL);
 824
 825        if (!page1 || !page2) {
 826                ret = -ENOMEM;
 827                goto out;
 828        }
 829
 830        r_sector_first = le64_to_cpu(e->data_sector) * (block_size >> 9);
 831
 832        if ((pp_size >> 9) < conf->chunk_sectors) {
 833                if (pp_size > 0) {
 834                        data_disks = data_size / pp_size;
 835                        strip_sectors = pp_size >> 9;
 836                } else {
 837                        data_disks = conf->raid_disks - conf->max_degraded;
 838                        strip_sectors = (data_size >> 9) / data_disks;
 839                }
 840                r_sector_last = r_sector_first +
 841                                (data_disks - 1) * conf->chunk_sectors +
 842                                strip_sectors;
 843        } else {
 844                data_disks = conf->raid_disks - conf->max_degraded;
 845                strip_sectors = conf->chunk_sectors;
 846                r_sector_last = r_sector_first + (data_size >> 9);
 847        }
 848
 849        pr_debug("%s: array sector first: %llu last: %llu\n", __func__,
 850                 (unsigned long long)r_sector_first,
 851                 (unsigned long long)r_sector_last);
 852
 853        /* if start and end is 4k aligned, use a 4k block */
 854        if (block_size == 512 &&
 855            (r_sector_first & (STRIPE_SECTORS - 1)) == 0 &&
 856            (r_sector_last & (STRIPE_SECTORS - 1)) == 0)
 857                block_size = STRIPE_SIZE;
 858
 859        /* iterate through blocks in strip */
 860        for (i = 0; i < strip_sectors; i += (block_size >> 9)) {
 861                bool update_parity = false;
 862                sector_t parity_sector;
 863                struct md_rdev *parity_rdev;
 864                struct stripe_head sh;
 865                int disk;
 866                int indent = 0;
 867
 868                pr_debug("%s:%*s iter %d start\n", __func__, indent, "", i);
 869                indent += 2;
 870
 871                memset(page_address(page1), 0, PAGE_SIZE);
 872
 873                /* iterate through data member disks */
 874                for (disk = 0; disk < data_disks; disk++) {
 875                        int dd_idx;
 876                        struct md_rdev *rdev;
 877                        sector_t sector;
 878                        sector_t r_sector = r_sector_first + i +
 879                                            (disk * conf->chunk_sectors);
 880
 881                        pr_debug("%s:%*s data member disk %d start\n",
 882                                 __func__, indent, "", disk);
 883                        indent += 2;
 884
 885                        if (r_sector >= r_sector_last) {
 886                                pr_debug("%s:%*s array sector %llu doesn't need parity update\n",
 887                                         __func__, indent, "",
 888                                         (unsigned long long)r_sector);
 889                                indent -= 2;
 890                                continue;
 891                        }
 892
 893                        update_parity = true;
 894
 895                        /* map raid sector to member disk */
 896                        sector = raid5_compute_sector(conf, r_sector, 0,
 897                                                      &dd_idx, NULL);
 898                        pr_debug("%s:%*s processing array sector %llu => data member disk %d, sector %llu\n",
 899                                 __func__, indent, "",
 900                                 (unsigned long long)r_sector, dd_idx,
 901                                 (unsigned long long)sector);
 902
 903                        rdev = conf->disks[dd_idx].rdev;
 904                        if (!rdev || (!test_bit(In_sync, &rdev->flags) &&
 905                                      sector >= rdev->recovery_offset)) {
 906                                pr_debug("%s:%*s data member disk %d missing\n",
 907                                         __func__, indent, "", dd_idx);
 908                                update_parity = false;
 909                                break;
 910                        }
 911
 912                        pr_debug("%s:%*s reading data member disk %s sector %llu\n",
 913                                 __func__, indent, "", bdevname(rdev->bdev, b),
 914                                 (unsigned long long)sector);
 915                        if (!sync_page_io(rdev, sector, block_size, page2,
 916                                        REQ_OP_READ, 0, false)) {
 917                                md_error(mddev, rdev);
 918                                pr_debug("%s:%*s read failed!\n", __func__,
 919                                         indent, "");
 920                                ret = -EIO;
 921                                goto out;
 922                        }
 923
 924                        ppl_xor(block_size, page1, page2);
 925
 926                        indent -= 2;
 927                }
 928
 929                if (!update_parity)
 930                        continue;
 931
 932                if (pp_size > 0) {
 933                        pr_debug("%s:%*s reading pp disk sector %llu\n",
 934                                 __func__, indent, "",
 935                                 (unsigned long long)(ppl_sector + i));
 936                        if (!sync_page_io(log->rdev,
 937                                        ppl_sector - log->rdev->data_offset + i,
 938                                        block_size, page2, REQ_OP_READ, 0,
 939                                        false)) {
 940                                pr_debug("%s:%*s read failed!\n", __func__,
 941                                         indent, "");
 942                                md_error(mddev, log->rdev);
 943                                ret = -EIO;
 944                                goto out;
 945                        }
 946
 947                        ppl_xor(block_size, page1, page2);
 948                }
 949
 950                /* map raid sector to parity disk */
 951                parity_sector = raid5_compute_sector(conf, r_sector_first + i,
 952                                0, &disk, &sh);
 953                BUG_ON(sh.pd_idx != le32_to_cpu(e->parity_disk));
 954                parity_rdev = conf->disks[sh.pd_idx].rdev;
 955
 956                BUG_ON(parity_rdev->bdev->bd_dev != log->rdev->bdev->bd_dev);
 957                pr_debug("%s:%*s write parity at sector %llu, disk %s\n",
 958                         __func__, indent, "",
 959                         (unsigned long long)parity_sector,
 960                         bdevname(parity_rdev->bdev, b));
 961                if (!sync_page_io(parity_rdev, parity_sector, block_size,
 962                                page1, REQ_OP_WRITE, 0, false)) {
 963                        pr_debug("%s:%*s parity write error!\n", __func__,
 964                                 indent, "");
 965                        md_error(mddev, parity_rdev);
 966                        ret = -EIO;
 967                        goto out;
 968                }
 969        }
 970out:
 971        if (page1)
 972                __free_page(page1);
 973        if (page2)
 974                __free_page(page2);
 975        return ret;
 976}
 977
 978static int ppl_recover(struct ppl_log *log, struct ppl_header *pplhdr,
 979                       sector_t offset)
 980{
 981        struct ppl_conf *ppl_conf = log->ppl_conf;
 982        struct md_rdev *rdev = log->rdev;
 983        struct mddev *mddev = rdev->mddev;
 984        sector_t ppl_sector = rdev->ppl.sector + offset +
 985                              (PPL_HEADER_SIZE >> 9);
 986        struct page *page;
 987        int i;
 988        int ret = 0;
 989
 990        page = alloc_page(GFP_KERNEL);
 991        if (!page)
 992                return -ENOMEM;
 993
 994        /* iterate through all PPL entries saved */
 995        for (i = 0; i < le32_to_cpu(pplhdr->entries_count); i++) {
 996                struct ppl_header_entry *e = &pplhdr->entries[i];
 997                u32 pp_size = le32_to_cpu(e->pp_size);
 998                sector_t sector = ppl_sector;
 999                int ppl_entry_sectors = pp_size >> 9;
1000                u32 crc, crc_stored;
1001
1002                pr_debug("%s: disk: %d entry: %d ppl_sector: %llu pp_size: %u\n",
1003                         __func__, rdev->raid_disk, i,
1004                         (unsigned long long)ppl_sector, pp_size);
1005
1006                crc = ~0;
1007                crc_stored = le32_to_cpu(e->checksum);
1008
1009                /* read parial parity for this entry and calculate its checksum */
1010                while (pp_size) {
1011                        int s = pp_size > PAGE_SIZE ? PAGE_SIZE : pp_size;
1012
1013                        if (!sync_page_io(rdev, sector - rdev->data_offset,
1014                                        s, page, REQ_OP_READ, 0, false)) {
1015                                md_error(mddev, rdev);
1016                                ret = -EIO;
1017                                goto out;
1018                        }
1019
1020                        crc = crc32c_le(crc, page_address(page), s);
1021
1022                        pp_size -= s;
1023                        sector += s >> 9;
1024                }
1025
1026                crc = ~crc;
1027
1028                if (crc != crc_stored) {
1029                        /*
1030                         * Don't recover this entry if the checksum does not
1031                         * match, but keep going and try to recover other
1032                         * entries.
1033                         */
1034                        pr_debug("%s: ppl entry crc does not match: stored: 0x%x calculated: 0x%x\n",
1035                                 __func__, crc_stored, crc);
1036                        ppl_conf->mismatch_count++;
1037                } else {
1038                        ret = ppl_recover_entry(log, e, ppl_sector);
1039                        if (ret)
1040                                goto out;
1041                        ppl_conf->recovered_entries++;
1042                }
1043
1044                ppl_sector += ppl_entry_sectors;
1045        }
1046
1047        /* flush the disk cache after recovery if necessary */
1048        ret = blkdev_issue_flush(rdev->bdev, GFP_KERNEL, NULL);
1049out:
1050        __free_page(page);
1051        return ret;
1052}
1053
1054static int ppl_write_empty_header(struct ppl_log *log)
1055{
1056        struct page *page;
1057        struct ppl_header *pplhdr;
1058        struct md_rdev *rdev = log->rdev;
1059        int ret = 0;
1060
1061        pr_debug("%s: disk: %d ppl_sector: %llu\n", __func__,
1062                 rdev->raid_disk, (unsigned long long)rdev->ppl.sector);
1063
1064        page = alloc_page(GFP_NOIO | __GFP_ZERO);
1065        if (!page)
1066                return -ENOMEM;
1067
1068        pplhdr = page_address(page);
1069        /* zero out PPL space to avoid collision with old PPLs */
1070        blkdev_issue_zeroout(rdev->bdev, rdev->ppl.sector,
1071                            log->rdev->ppl.size, GFP_NOIO, 0);
1072        memset(pplhdr->reserved, 0xff, PPL_HDR_RESERVED);
1073        pplhdr->signature = cpu_to_le32(log->ppl_conf->signature);
1074        pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PAGE_SIZE));
1075
1076        if (!sync_page_io(rdev, rdev->ppl.sector - rdev->data_offset,
1077                          PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_SYNC |
1078                          REQ_FUA, 0, false)) {
1079                md_error(rdev->mddev, rdev);
1080                ret = -EIO;
1081        }
1082
1083        __free_page(page);
1084        return ret;
1085}
1086
1087static int ppl_load_distributed(struct ppl_log *log)
1088{
1089        struct ppl_conf *ppl_conf = log->ppl_conf;
1090        struct md_rdev *rdev = log->rdev;
1091        struct mddev *mddev = rdev->mddev;
1092        struct page *page, *page2, *tmp;
1093        struct ppl_header *pplhdr = NULL, *prev_pplhdr = NULL;
1094        u32 crc, crc_stored;
1095        u32 signature;
1096        int ret = 0, i;
1097        sector_t pplhdr_offset = 0, prev_pplhdr_offset = 0;
1098
1099        pr_debug("%s: disk: %d\n", __func__, rdev->raid_disk);
1100        /* read PPL headers, find the recent one */
1101        page = alloc_page(GFP_KERNEL);
1102        if (!page)
1103                return -ENOMEM;
1104
1105        page2 = alloc_page(GFP_KERNEL);
1106        if (!page2) {
1107                __free_page(page);
1108                return -ENOMEM;
1109        }
1110
1111        /* searching ppl area for latest ppl */
1112        while (pplhdr_offset < rdev->ppl.size - (PPL_HEADER_SIZE >> 9)) {
1113                if (!sync_page_io(rdev,
1114                                  rdev->ppl.sector - rdev->data_offset +
1115                                  pplhdr_offset, PAGE_SIZE, page, REQ_OP_READ,
1116                                  0, false)) {
1117                        md_error(mddev, rdev);
1118                        ret = -EIO;
1119                        /* if not able to read - don't recover any PPL */
1120                        pplhdr = NULL;
1121                        break;
1122                }
1123                pplhdr = page_address(page);
1124
1125                /* check header validity */
1126                crc_stored = le32_to_cpu(pplhdr->checksum);
1127                pplhdr->checksum = 0;
1128                crc = ~crc32c_le(~0, pplhdr, PAGE_SIZE);
1129
1130                if (crc_stored != crc) {
1131                        pr_debug("%s: ppl header crc does not match: stored: 0x%x calculated: 0x%x (offset: %llu)\n",
1132                                 __func__, crc_stored, crc,
1133                                 (unsigned long long)pplhdr_offset);
1134                        pplhdr = prev_pplhdr;
1135                        pplhdr_offset = prev_pplhdr_offset;
1136                        break;
1137                }
1138
1139                signature = le32_to_cpu(pplhdr->signature);
1140
1141                if (mddev->external) {
1142                        /*
1143                         * For external metadata the header signature is set and
1144                         * validated in userspace.
1145                         */
1146                        ppl_conf->signature = signature;
1147                } else if (ppl_conf->signature != signature) {
1148                        pr_debug("%s: ppl header signature does not match: stored: 0x%x configured: 0x%x (offset: %llu)\n",
1149                                 __func__, signature, ppl_conf->signature,
1150                                 (unsigned long long)pplhdr_offset);
1151                        pplhdr = prev_pplhdr;
1152                        pplhdr_offset = prev_pplhdr_offset;
1153                        break;
1154                }
1155
1156                if (prev_pplhdr && le64_to_cpu(prev_pplhdr->generation) >
1157                    le64_to_cpu(pplhdr->generation)) {
1158                        /* previous was newest */
1159                        pplhdr = prev_pplhdr;
1160                        pplhdr_offset = prev_pplhdr_offset;
1161                        break;
1162                }
1163
1164                prev_pplhdr_offset = pplhdr_offset;
1165                prev_pplhdr = pplhdr;
1166
1167                tmp = page;
1168                page = page2;
1169                page2 = tmp;
1170
1171                /* calculate next potential ppl offset */
1172                for (i = 0; i < le32_to_cpu(pplhdr->entries_count); i++)
1173                        pplhdr_offset +=
1174                            le32_to_cpu(pplhdr->entries[i].pp_size) >> 9;
1175                pplhdr_offset += PPL_HEADER_SIZE >> 9;
1176        }
1177
1178        /* no valid ppl found */
1179        if (!pplhdr)
1180                ppl_conf->mismatch_count++;
1181        else
1182                pr_debug("%s: latest PPL found at offset: %llu, with generation: %llu\n",
1183                    __func__, (unsigned long long)pplhdr_offset,
1184                    le64_to_cpu(pplhdr->generation));
1185
1186        /* attempt to recover from log if we are starting a dirty array */
1187        if (pplhdr && !mddev->pers && mddev->recovery_cp != MaxSector)
1188                ret = ppl_recover(log, pplhdr, pplhdr_offset);
1189
1190        /* write empty header if we are starting the array */
1191        if (!ret && !mddev->pers)
1192                ret = ppl_write_empty_header(log);
1193
1194        __free_page(page);
1195        __free_page(page2);
1196
1197        pr_debug("%s: return: %d mismatch_count: %d recovered_entries: %d\n",
1198                 __func__, ret, ppl_conf->mismatch_count,
1199                 ppl_conf->recovered_entries);
1200        return ret;
1201}
1202
1203static int ppl_load(struct ppl_conf *ppl_conf)
1204{
1205        int ret = 0;
1206        u32 signature = 0;
1207        bool signature_set = false;
1208        int i;
1209
1210        for (i = 0; i < ppl_conf->count; i++) {
1211                struct ppl_log *log = &ppl_conf->child_logs[i];
1212
1213                /* skip missing drive */
1214                if (!log->rdev)
1215                        continue;
1216
1217                ret = ppl_load_distributed(log);
1218                if (ret)
1219                        break;
1220
1221                /*
1222                 * For external metadata we can't check if the signature is
1223                 * correct on a single drive, but we can check if it is the same
1224                 * on all drives.
1225                 */
1226                if (ppl_conf->mddev->external) {
1227                        if (!signature_set) {
1228                                signature = ppl_conf->signature;
1229                                signature_set = true;
1230                        } else if (signature != ppl_conf->signature) {
1231                                pr_warn("md/raid:%s: PPL header signature does not match on all member drives\n",
1232                                        mdname(ppl_conf->mddev));
1233                                ret = -EINVAL;
1234                                break;
1235                        }
1236                }
1237        }
1238
1239        pr_debug("%s: return: %d mismatch_count: %d recovered_entries: %d\n",
1240                 __func__, ret, ppl_conf->mismatch_count,
1241                 ppl_conf->recovered_entries);
1242        return ret;
1243}
1244
1245static void __ppl_exit_log(struct ppl_conf *ppl_conf)
1246{
1247        clear_bit(MD_HAS_PPL, &ppl_conf->mddev->flags);
1248        clear_bit(MD_HAS_MULTIPLE_PPLS, &ppl_conf->mddev->flags);
1249
1250        kfree(ppl_conf->child_logs);
1251
1252        bioset_exit(&ppl_conf->bs);
1253        bioset_exit(&ppl_conf->flush_bs);
1254        mempool_exit(&ppl_conf->io_pool);
1255        kmem_cache_destroy(ppl_conf->io_kc);
1256
1257        kfree(ppl_conf);
1258}
1259
1260void ppl_exit_log(struct r5conf *conf)
1261{
1262        struct ppl_conf *ppl_conf = conf->log_private;
1263
1264        if (ppl_conf) {
1265                __ppl_exit_log(ppl_conf);
1266                conf->log_private = NULL;
1267        }
1268}
1269
1270static int ppl_validate_rdev(struct md_rdev *rdev)
1271{
1272        char b[BDEVNAME_SIZE];
1273        int ppl_data_sectors;
1274        int ppl_size_new;
1275
1276        /*
1277         * The configured PPL size must be enough to store
1278         * the header and (at the very least) partial parity
1279         * for one stripe. Round it down to ensure the data
1280         * space is cleanly divisible by stripe size.
1281         */
1282        ppl_data_sectors = rdev->ppl.size - (PPL_HEADER_SIZE >> 9);
1283
1284        if (ppl_data_sectors > 0)
1285                ppl_data_sectors = rounddown(ppl_data_sectors, STRIPE_SECTORS);
1286
1287        if (ppl_data_sectors <= 0) {
1288                pr_warn("md/raid:%s: PPL space too small on %s\n",
1289                        mdname(rdev->mddev), bdevname(rdev->bdev, b));
1290                return -ENOSPC;
1291        }
1292
1293        ppl_size_new = ppl_data_sectors + (PPL_HEADER_SIZE >> 9);
1294
1295        if ((rdev->ppl.sector < rdev->data_offset &&
1296             rdev->ppl.sector + ppl_size_new > rdev->data_offset) ||
1297            (rdev->ppl.sector >= rdev->data_offset &&
1298             rdev->data_offset + rdev->sectors > rdev->ppl.sector)) {
1299                pr_warn("md/raid:%s: PPL space overlaps with data on %s\n",
1300                        mdname(rdev->mddev), bdevname(rdev->bdev, b));
1301                return -EINVAL;
1302        }
1303
1304        if (!rdev->mddev->external &&
1305            ((rdev->ppl.offset > 0 && rdev->ppl.offset < (rdev->sb_size >> 9)) ||
1306             (rdev->ppl.offset <= 0 && rdev->ppl.offset + ppl_size_new > 0))) {
1307                pr_warn("md/raid:%s: PPL space overlaps with superblock on %s\n",
1308                        mdname(rdev->mddev), bdevname(rdev->bdev, b));
1309                return -EINVAL;
1310        }
1311
1312        rdev->ppl.size = ppl_size_new;
1313
1314        return 0;
1315}
1316
1317static void ppl_init_child_log(struct ppl_log *log, struct md_rdev *rdev)
1318{
1319        struct request_queue *q;
1320
1321        if ((rdev->ppl.size << 9) >= (PPL_SPACE_SIZE +
1322                                      PPL_HEADER_SIZE) * 2) {
1323                log->use_multippl = true;
1324                set_bit(MD_HAS_MULTIPLE_PPLS,
1325                        &log->ppl_conf->mddev->flags);
1326                log->entry_space = PPL_SPACE_SIZE;
1327        } else {
1328                log->use_multippl = false;
1329                log->entry_space = (log->rdev->ppl.size << 9) -
1330                                   PPL_HEADER_SIZE;
1331        }
1332        log->next_io_sector = rdev->ppl.sector;
1333
1334        q = bdev_get_queue(rdev->bdev);
1335        if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
1336                log->wb_cache_on = true;
1337}
1338
1339int ppl_init_log(struct r5conf *conf)
1340{
1341        struct ppl_conf *ppl_conf;
1342        struct mddev *mddev = conf->mddev;
1343        int ret = 0;
1344        int max_disks;
1345        int i;
1346
1347        pr_debug("md/raid:%s: enabling distributed Partial Parity Log\n",
1348                 mdname(conf->mddev));
1349
1350        if (PAGE_SIZE != 4096)
1351                return -EINVAL;
1352
1353        if (mddev->level != 5) {
1354                pr_warn("md/raid:%s PPL is not compatible with raid level %d\n",
1355                        mdname(mddev), mddev->level);
1356                return -EINVAL;
1357        }
1358
1359        if (mddev->bitmap_info.file || mddev->bitmap_info.offset) {
1360                pr_warn("md/raid:%s PPL is not compatible with bitmap\n",
1361                        mdname(mddev));
1362                return -EINVAL;
1363        }
1364
1365        if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
1366                pr_warn("md/raid:%s PPL is not compatible with journal\n",
1367                        mdname(mddev));
1368                return -EINVAL;
1369        }
1370
1371        max_disks = FIELD_SIZEOF(struct ppl_log, disk_flush_bitmap) *
1372                BITS_PER_BYTE;
1373        if (conf->raid_disks > max_disks) {
1374                pr_warn("md/raid:%s PPL doesn't support over %d disks in the array\n",
1375                        mdname(mddev), max_disks);
1376                return -EINVAL;
1377        }
1378
1379        ppl_conf = kzalloc(sizeof(struct ppl_conf), GFP_KERNEL);
1380        if (!ppl_conf)
1381                return -ENOMEM;
1382
1383        ppl_conf->mddev = mddev;
1384
1385        ppl_conf->io_kc = KMEM_CACHE(ppl_io_unit, 0);
1386        if (!ppl_conf->io_kc) {
1387                ret = -ENOMEM;
1388                goto err;
1389        }
1390
1391        ret = mempool_init(&ppl_conf->io_pool, conf->raid_disks, ppl_io_pool_alloc,
1392                           ppl_io_pool_free, ppl_conf->io_kc);
1393        if (ret)
1394                goto err;
1395
1396        ret = bioset_init(&ppl_conf->bs, conf->raid_disks, 0, BIOSET_NEED_BVECS);
1397        if (ret)
1398                goto err;
1399
1400        ret = bioset_init(&ppl_conf->flush_bs, conf->raid_disks, 0, 0);
1401        if (ret)
1402                goto err;
1403
1404        ppl_conf->count = conf->raid_disks;
1405        ppl_conf->child_logs = kcalloc(ppl_conf->count, sizeof(struct ppl_log),
1406                                       GFP_KERNEL);
1407        if (!ppl_conf->child_logs) {
1408                ret = -ENOMEM;
1409                goto err;
1410        }
1411
1412        atomic64_set(&ppl_conf->seq, 0);
1413        INIT_LIST_HEAD(&ppl_conf->no_mem_stripes);
1414        spin_lock_init(&ppl_conf->no_mem_stripes_lock);
1415        ppl_conf->write_hint = RWF_WRITE_LIFE_NOT_SET;
1416
1417        if (!mddev->external) {
1418                ppl_conf->signature = ~crc32c_le(~0, mddev->uuid, sizeof(mddev->uuid));
1419                ppl_conf->block_size = 512;
1420        } else {
1421                ppl_conf->block_size = queue_logical_block_size(mddev->queue);
1422        }
1423
1424        for (i = 0; i < ppl_conf->count; i++) {
1425                struct ppl_log *log = &ppl_conf->child_logs[i];
1426                struct md_rdev *rdev = conf->disks[i].rdev;
1427
1428                mutex_init(&log->io_mutex);
1429                spin_lock_init(&log->io_list_lock);
1430                INIT_LIST_HEAD(&log->io_list);
1431
1432                log->ppl_conf = ppl_conf;
1433                log->rdev = rdev;
1434
1435                if (rdev) {
1436                        ret = ppl_validate_rdev(rdev);
1437                        if (ret)
1438                                goto err;
1439
1440                        ppl_init_child_log(log, rdev);
1441                }
1442        }
1443
1444        /* load and possibly recover the logs from the member disks */
1445        ret = ppl_load(ppl_conf);
1446
1447        if (ret) {
1448                goto err;
1449        } else if (!mddev->pers && mddev->recovery_cp == 0 &&
1450                   ppl_conf->recovered_entries > 0 &&
1451                   ppl_conf->mismatch_count == 0) {
1452                /*
1453                 * If we are starting a dirty array and the recovery succeeds
1454                 * without any issues, set the array as clean.
1455                 */
1456                mddev->recovery_cp = MaxSector;
1457                set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
1458        } else if (mddev->pers && ppl_conf->mismatch_count > 0) {
1459                /* no mismatch allowed when enabling PPL for a running array */
1460                ret = -EINVAL;
1461                goto err;
1462        }
1463
1464        conf->log_private = ppl_conf;
1465        set_bit(MD_HAS_PPL, &ppl_conf->mddev->flags);
1466
1467        return 0;
1468err:
1469        __ppl_exit_log(ppl_conf);
1470        return ret;
1471}
1472
1473int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add)
1474{
1475        struct ppl_conf *ppl_conf = conf->log_private;
1476        struct ppl_log *log;
1477        int ret = 0;
1478        char b[BDEVNAME_SIZE];
1479
1480        if (!rdev)
1481                return -EINVAL;
1482
1483        pr_debug("%s: disk: %d operation: %s dev: %s\n",
1484                 __func__, rdev->raid_disk, add ? "add" : "remove",
1485                 bdevname(rdev->bdev, b));
1486
1487        if (rdev->raid_disk < 0)
1488                return 0;
1489
1490        if (rdev->raid_disk >= ppl_conf->count)
1491                return -ENODEV;
1492
1493        log = &ppl_conf->child_logs[rdev->raid_disk];
1494
1495        mutex_lock(&log->io_mutex);
1496        if (add) {
1497                ret = ppl_validate_rdev(rdev);
1498                if (!ret) {
1499                        log->rdev = rdev;
1500                        ret = ppl_write_empty_header(log);
1501                        ppl_init_child_log(log, rdev);
1502                }
1503        } else {
1504                log->rdev = NULL;
1505        }
1506        mutex_unlock(&log->io_mutex);
1507
1508        return ret;
1509}
1510
1511static ssize_t
1512ppl_write_hint_show(struct mddev *mddev, char *buf)
1513{
1514        size_t ret = 0;
1515        struct r5conf *conf;
1516        struct ppl_conf *ppl_conf = NULL;
1517
1518        spin_lock(&mddev->lock);
1519        conf = mddev->private;
1520        if (conf && raid5_has_ppl(conf))
1521                ppl_conf = conf->log_private;
1522        ret = sprintf(buf, "%d\n", ppl_conf ? ppl_conf->write_hint : 0);
1523        spin_unlock(&mddev->lock);
1524
1525        return ret;
1526}
1527
1528static ssize_t
1529ppl_write_hint_store(struct mddev *mddev, const char *page, size_t len)
1530{
1531        struct r5conf *conf;
1532        struct ppl_conf *ppl_conf;
1533        int err = 0;
1534        unsigned short new;
1535
1536        if (len >= PAGE_SIZE)
1537                return -EINVAL;
1538        if (kstrtou16(page, 10, &new))
1539                return -EINVAL;
1540
1541        err = mddev_lock(mddev);
1542        if (err)
1543                return err;
1544
1545        conf = mddev->private;
1546        if (!conf) {
1547                err = -ENODEV;
1548        } else if (raid5_has_ppl(conf)) {
1549                ppl_conf = conf->log_private;
1550                if (!ppl_conf)
1551                        err = -EINVAL;
1552                else
1553                        ppl_conf->write_hint = new;
1554        } else {
1555                err = -EINVAL;
1556        }
1557
1558        mddev_unlock(mddev);
1559
1560        return err ?: len;
1561}
1562
1563struct md_sysfs_entry
1564ppl_write_hint = __ATTR(ppl_write_hint, S_IRUGO | S_IWUSR,
1565                        ppl_write_hint_show,
1566                        ppl_write_hint_store);
1567