linux/drivers/md/raid5.c
<<
>>
Prefs
   1/*
   2 * raid5.c : Multiple Devices driver for Linux
   3 *         Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
   4 *         Copyright (C) 1999, 2000 Ingo Molnar
   5 *         Copyright (C) 2002, 2003 H. Peter Anvin
   6 *
   7 * RAID-4/5/6 management functions.
   8 * Thanks to Penguin Computing for making the RAID-6 development possible
   9 * by donating a test server!
  10 *
  11 * This program is free software; you can redistribute it and/or modify
  12 * it under the terms of the GNU General Public License as published by
  13 * the Free Software Foundation; either version 2, or (at your option)
  14 * any later version.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * (for example /usr/src/linux/COPYING); if not, write to the Free
  18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  19 */
  20
  21/*
  22 * BITMAP UNPLUGGING:
  23 *
  24 * The sequencing for updating the bitmap reliably is a little
  25 * subtle (and I got it wrong the first time) so it deserves some
  26 * explanation.
  27 *
  28 * We group bitmap updates into batches.  Each batch has a number.
  29 * We may write out several batches at once, but that isn't very important.
  30 * conf->seq_write is the number of the last batch successfully written.
  31 * conf->seq_flush is the number of the last batch that was closed to
  32 *    new additions.
  33 * When we discover that we will need to write to any block in a stripe
  34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
  35 * the number of the batch it will be in. This is seq_flush+1.
  36 * When we are ready to do a write, if that batch hasn't been written yet,
  37 *   we plug the array and queue the stripe for later.
  38 * When an unplug happens, we increment bm_flush, thus closing the current
  39 *   batch.
  40 * When we notice that bm_flush > bm_write, we write out all pending updates
  41 * to the bitmap, and advance bm_write to where bm_flush was.
  42 * This may occasionally write a bit out twice, but is sure never to
  43 * miss any bits.
  44 */
  45
  46#include <linux/blkdev.h>
  47#include <linux/kthread.h>
  48#include <linux/raid/pq.h>
  49#include <linux/async_tx.h>
  50#include <linux/async.h>
  51#include <linux/seq_file.h>
  52#include <linux/cpu.h>
  53#include <linux/slab.h>
  54#include <linux/ratelimit.h>
  55#include "md.h"
  56#include "raid5.h"
  57#include "raid0.h"
  58#include "bitmap.h"
  59
  60/*
  61 * Stripe cache
  62 */
  63
  64#define NR_STRIPES              256
  65#define STRIPE_SIZE             PAGE_SIZE
  66#define STRIPE_SHIFT            (PAGE_SHIFT - 9)
  67#define STRIPE_SECTORS          (STRIPE_SIZE>>9)
  68#define IO_THRESHOLD            1
  69#define BYPASS_THRESHOLD        1
  70#define NR_HASH                 (PAGE_SIZE / sizeof(struct hlist_head))
  71#define HASH_MASK               (NR_HASH - 1)
  72
  73#define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]))
  74
  75/* bio's attached to a stripe+device for I/O are linked together in bi_sector
  76 * order without overlap.  There may be several bio's per stripe+device, and
  77 * a bio could span several devices.
  78 * When walking this list for a particular stripe+device, we must never proceed
  79 * beyond a bio that extends past this device, as the next bio might no longer
  80 * be valid.
  81 * This macro is used to determine the 'next' bio in the list, given the sector
  82 * of the current stripe+device
  83 */
  84#define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL)
  85/*
  86 * The following can be used to debug the driver
  87 */
  88#define RAID5_PARANOIA  1
  89#if RAID5_PARANOIA && defined(CONFIG_SMP)
  90# define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock)
  91#else
  92# define CHECK_DEVLOCK()
  93#endif
  94
  95#ifdef DEBUG
  96#define inline
  97#define __inline__
  98#endif
  99
 100/*
 101 * We maintain a biased count of active stripes in the bottom 16 bits of
 102 * bi_phys_segments, and a count of processed stripes in the upper 16 bits
 103 */
 104static inline int raid5_bi_phys_segments(struct bio *bio)
 105{
 106        return bio->bi_phys_segments & 0xffff;
 107}
 108
 109static inline int raid5_bi_hw_segments(struct bio *bio)
 110{
 111        return (bio->bi_phys_segments >> 16) & 0xffff;
 112}
 113
 114static inline int raid5_dec_bi_phys_segments(struct bio *bio)
 115{
 116        --bio->bi_phys_segments;
 117        return raid5_bi_phys_segments(bio);
 118}
 119
 120static inline int raid5_dec_bi_hw_segments(struct bio *bio)
 121{
 122        unsigned short val = raid5_bi_hw_segments(bio);
 123
 124        --val;
 125        bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio);
 126        return val;
 127}
 128
 129static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt)
 130{
 131        bio->bi_phys_segments = raid5_bi_phys_segments(bio) | (cnt << 16);
 132}
 133
 134/* Find first data disk in a raid6 stripe */
 135static inline int raid6_d0(struct stripe_head *sh)
 136{
 137        if (sh->ddf_layout)
 138                /* ddf always start from first device */
 139                return 0;
 140        /* md starts just after Q block */
 141        if (sh->qd_idx == sh->disks - 1)
 142                return 0;
 143        else
 144                return sh->qd_idx + 1;
 145}
 146static inline int raid6_next_disk(int disk, int raid_disks)
 147{
 148        disk++;
 149        return (disk < raid_disks) ? disk : 0;
 150}
 151
 152/* When walking through the disks in a raid5, starting at raid6_d0,
 153 * We need to map each disk to a 'slot', where the data disks are slot
 154 * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk
 155 * is raid_disks-1.  This help does that mapping.
 156 */
 157static int raid6_idx_to_slot(int idx, struct stripe_head *sh,
 158                             int *count, int syndrome_disks)
 159{
 160        int slot = *count;
 161
 162        if (sh->ddf_layout)
 163                (*count)++;
 164        if (idx == sh->pd_idx)
 165                return syndrome_disks;
 166        if (idx == sh->qd_idx)
 167                return syndrome_disks + 1;
 168        if (!sh->ddf_layout)
 169                (*count)++;
 170        return slot;
 171}
 172
 173static void return_io(struct bio *return_bi)
 174{
 175        struct bio *bi = return_bi;
 176        while (bi) {
 177
 178                return_bi = bi->bi_next;
 179                bi->bi_next = NULL;
 180                bi->bi_size = 0;
 181                bio_endio(bi, 0);
 182                bi = return_bi;
 183        }
 184}
 185
 186static void print_raid5_conf (raid5_conf_t *conf);
 187
 188static int stripe_operations_active(struct stripe_head *sh)
 189{
 190        return sh->check_state || sh->reconstruct_state ||
 191               test_bit(STRIPE_BIOFILL_RUN, &sh->state) ||
 192               test_bit(STRIPE_COMPUTE_RUN, &sh->state);
 193}
 194
 195static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
 196{
 197        if (atomic_dec_and_test(&sh->count)) {
 198                BUG_ON(!list_empty(&sh->lru));
 199                BUG_ON(atomic_read(&conf->active_stripes)==0);
 200                if (test_bit(STRIPE_HANDLE, &sh->state)) {
 201                        if (test_bit(STRIPE_DELAYED, &sh->state))
 202                                list_add_tail(&sh->lru, &conf->delayed_list);
 203                        else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
 204                                   sh->bm_seq - conf->seq_write > 0)
 205                                list_add_tail(&sh->lru, &conf->bitmap_list);
 206                        else {
 207                                clear_bit(STRIPE_BIT_DELAY, &sh->state);
 208                                list_add_tail(&sh->lru, &conf->handle_list);
 209                        }
 210                        md_wakeup_thread(conf->mddev->thread);
 211                } else {
 212                        BUG_ON(stripe_operations_active(sh));
 213                        if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
 214                                atomic_dec(&conf->preread_active_stripes);
 215                                if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
 216                                        md_wakeup_thread(conf->mddev->thread);
 217                        }
 218                        atomic_dec(&conf->active_stripes);
 219                        if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
 220                                list_add_tail(&sh->lru, &conf->inactive_list);
 221                                wake_up(&conf->wait_for_stripe);
 222                                if (conf->retry_read_aligned)
 223                                        md_wakeup_thread(conf->mddev->thread);
 224                        }
 225                }
 226        }
 227}
 228
 229static void release_stripe(struct stripe_head *sh)
 230{
 231        raid5_conf_t *conf = sh->raid_conf;
 232        unsigned long flags;
 233
 234        spin_lock_irqsave(&conf->device_lock, flags);
 235        __release_stripe(conf, sh);
 236        spin_unlock_irqrestore(&conf->device_lock, flags);
 237}
 238
 239static inline void remove_hash(struct stripe_head *sh)
 240{
 241        pr_debug("remove_hash(), stripe %llu\n",
 242                (unsigned long long)sh->sector);
 243
 244        hlist_del_init(&sh->hash);
 245}
 246
 247static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
 248{
 249        struct hlist_head *hp = stripe_hash(conf, sh->sector);
 250
 251        pr_debug("insert_hash(), stripe %llu\n",
 252                (unsigned long long)sh->sector);
 253
 254        CHECK_DEVLOCK();
 255        hlist_add_head(&sh->hash, hp);
 256}
 257
 258
 259/* find an idle stripe, make sure it is unhashed, and return it. */
 260static struct stripe_head *get_free_stripe(raid5_conf_t *conf)
 261{
 262        struct stripe_head *sh = NULL;
 263        struct list_head *first;
 264
 265        CHECK_DEVLOCK();
 266        if (list_empty(&conf->inactive_list))
 267                goto out;
 268        first = conf->inactive_list.next;
 269        sh = list_entry(first, struct stripe_head, lru);
 270        list_del_init(first);
 271        remove_hash(sh);
 272        atomic_inc(&conf->active_stripes);
 273out:
 274        return sh;
 275}
 276
 277static void shrink_buffers(struct stripe_head *sh)
 278{
 279        struct page *p;
 280        int i;
 281        int num = sh->raid_conf->pool_size;
 282
 283        for (i = 0; i < num ; i++) {
 284                p = sh->dev[i].page;
 285                if (!p)
 286                        continue;
 287                sh->dev[i].page = NULL;
 288                put_page(p);
 289        }
 290}
 291
 292static int grow_buffers(struct stripe_head *sh)
 293{
 294        int i;
 295        int num = sh->raid_conf->pool_size;
 296
 297        for (i = 0; i < num; i++) {
 298                struct page *page;
 299
 300                if (!(page = alloc_page(GFP_KERNEL))) {
 301                        return 1;
 302                }
 303                sh->dev[i].page = page;
 304        }
 305        return 0;
 306}
 307
 308static void raid5_build_block(struct stripe_head *sh, int i, int previous);
 309static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous,
 310                            struct stripe_head *sh);
 311
 312static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
 313{
 314        raid5_conf_t *conf = sh->raid_conf;
 315        int i;
 316
 317        BUG_ON(atomic_read(&sh->count) != 0);
 318        BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
 319        BUG_ON(stripe_operations_active(sh));
 320
 321        CHECK_DEVLOCK();
 322        pr_debug("init_stripe called, stripe %llu\n",
 323                (unsigned long long)sh->sector);
 324
 325        remove_hash(sh);
 326
 327        sh->generation = conf->generation - previous;
 328        sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;
 329        sh->sector = sector;
 330        stripe_set_idx(sector, conf, previous, sh);
 331        sh->state = 0;
 332
 333
 334        for (i = sh->disks; i--; ) {
 335                struct r5dev *dev = &sh->dev[i];
 336
 337                if (dev->toread || dev->read || dev->towrite || dev->written ||
 338                    test_bit(R5_LOCKED, &dev->flags)) {
 339                        printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n",
 340                               (unsigned long long)sh->sector, i, dev->toread,
 341                               dev->read, dev->towrite, dev->written,
 342                               test_bit(R5_LOCKED, &dev->flags));
 343                        WARN_ON(1);
 344                }
 345                dev->flags = 0;
 346                raid5_build_block(sh, i, previous);
 347        }
 348        insert_hash(conf, sh);
 349}
 350
 351static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector,
 352                                         short generation)
 353{
 354        struct stripe_head *sh;
 355        struct hlist_node *hn;
 356
 357        CHECK_DEVLOCK();
 358        pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector);
 359        hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash)
 360                if (sh->sector == sector && sh->generation == generation)
 361                        return sh;
 362        pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector);
 363        return NULL;
 364}
 365
 366/*
 367 * Need to check if array has failed when deciding whether to:
 368 *  - start an array
 369 *  - remove non-faulty devices
 370 *  - add a spare
 371 *  - allow a reshape
 372 * This determination is simple when no reshape is happening.
 373 * However if there is a reshape, we need to carefully check
 374 * both the before and after sections.
 375 * This is because some failed devices may only affect one
 376 * of the two sections, and some non-in_sync devices may
 377 * be insync in the section most affected by failed devices.
 378 */
 379static int has_failed(raid5_conf_t *conf)
 380{
 381        int degraded;
 382        int i;
 383        if (conf->mddev->reshape_position == MaxSector)
 384                return conf->mddev->degraded > conf->max_degraded;
 385
 386        rcu_read_lock();
 387        degraded = 0;
 388        for (i = 0; i < conf->previous_raid_disks; i++) {
 389                mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
 390                if (!rdev || test_bit(Faulty, &rdev->flags))
 391                        degraded++;
 392                else if (test_bit(In_sync, &rdev->flags))
 393                        ;
 394                else
 395                        /* not in-sync or faulty.
 396                         * If the reshape increases the number of devices,
 397                         * this is being recovered by the reshape, so
 398                         * this 'previous' section is not in_sync.
 399                         * If the number of devices is being reduced however,
 400                         * the device can only be part of the array if
 401                         * we are reverting a reshape, so this section will
 402                         * be in-sync.
 403                         */
 404                        if (conf->raid_disks >= conf->previous_raid_disks)
 405                                degraded++;
 406        }
 407        rcu_read_unlock();
 408        if (degraded > conf->max_degraded)
 409                return 1;
 410        rcu_read_lock();
 411        degraded = 0;
 412        for (i = 0; i < conf->raid_disks; i++) {
 413                mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
 414                if (!rdev || test_bit(Faulty, &rdev->flags))
 415                        degraded++;
 416                else if (test_bit(In_sync, &rdev->flags))
 417                        ;
 418                else
 419                        /* not in-sync or faulty.
 420                         * If reshape increases the number of devices, this
 421                         * section has already been recovered, else it
 422                         * almost certainly hasn't.
 423                         */
 424                        if (conf->raid_disks <= conf->previous_raid_disks)
 425                                degraded++;
 426        }
 427        rcu_read_unlock();
 428        if (degraded > conf->max_degraded)
 429                return 1;
 430        return 0;
 431}
 432
 433static struct stripe_head *
 434get_active_stripe(raid5_conf_t *conf, sector_t sector,
 435                  int previous, int noblock, int noquiesce)
 436{
 437        struct stripe_head *sh;
 438
 439        pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
 440
 441        spin_lock_irq(&conf->device_lock);
 442
 443        do {
 444                wait_event_lock_irq(conf->wait_for_stripe,
 445                                    conf->quiesce == 0 || noquiesce,
 446                                    conf->device_lock, /* nothing */);
 447                sh = __find_stripe(conf, sector, conf->generation - previous);
 448                if (!sh) {
 449                        if (!conf->inactive_blocked)
 450                                sh = get_free_stripe(conf);
 451                        if (noblock && sh == NULL)
 452                                break;
 453                        if (!sh) {
 454                                conf->inactive_blocked = 1;
 455                                wait_event_lock_irq(conf->wait_for_stripe,
 456                                                    !list_empty(&conf->inactive_list) &&
 457                                                    (atomic_read(&conf->active_stripes)
 458                                                     < (conf->max_nr_stripes *3/4)
 459                                                     || !conf->inactive_blocked),
 460                                                    conf->device_lock,
 461                                                    );
 462                                conf->inactive_blocked = 0;
 463                        } else
 464                                init_stripe(sh, sector, previous);
 465                } else {
 466                        if (atomic_read(&sh->count)) {
 467                                BUG_ON(!list_empty(&sh->lru)
 468                                    && !test_bit(STRIPE_EXPANDING, &sh->state));
 469                        } else {
 470                                if (!test_bit(STRIPE_HANDLE, &sh->state))
 471                                        atomic_inc(&conf->active_stripes);
 472                                if (list_empty(&sh->lru) &&
 473                                    !test_bit(STRIPE_EXPANDING, &sh->state))
 474                                        BUG();
 475                                list_del_init(&sh->lru);
 476                        }
 477                }
 478        } while (sh == NULL);
 479
 480        if (sh)
 481                atomic_inc(&sh->count);
 482
 483        spin_unlock_irq(&conf->device_lock);
 484        return sh;
 485}
 486
 487static void
 488raid5_end_read_request(struct bio *bi, int error);
 489static void
 490raid5_end_write_request(struct bio *bi, int error);
 491
 492static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
 493{
 494        raid5_conf_t *conf = sh->raid_conf;
 495        int i, disks = sh->disks;
 496
 497        might_sleep();
 498
 499        for (i = disks; i--; ) {
 500                int rw;
 501                struct bio *bi;
 502                mdk_rdev_t *rdev;
 503                if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
 504                        if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
 505                                rw = WRITE_FUA;
 506                        else
 507                                rw = WRITE;
 508                } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
 509                        rw = READ;
 510                else
 511                        continue;
 512
 513                bi = &sh->dev[i].req;
 514
 515                bi->bi_rw = rw;
 516                if (rw & WRITE)
 517                        bi->bi_end_io = raid5_end_write_request;
 518                else
 519                        bi->bi_end_io = raid5_end_read_request;
 520
 521                rcu_read_lock();
 522                rdev = rcu_dereference(conf->disks[i].rdev);
 523                if (rdev && test_bit(Faulty, &rdev->flags))
 524                        rdev = NULL;
 525                if (rdev)
 526                        atomic_inc(&rdev->nr_pending);
 527                rcu_read_unlock();
 528
 529                /* We have already checked bad blocks for reads.  Now
 530                 * need to check for writes.
 531                 */
 532                while ((rw & WRITE) && rdev &&
 533                       test_bit(WriteErrorSeen, &rdev->flags)) {
 534                        sector_t first_bad;
 535                        int bad_sectors;
 536                        int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
 537                                              &first_bad, &bad_sectors);
 538                        if (!bad)
 539                                break;
 540
 541                        if (bad < 0) {
 542                                set_bit(BlockedBadBlocks, &rdev->flags);
 543                                if (!conf->mddev->external &&
 544                                    conf->mddev->flags) {
 545                                        /* It is very unlikely, but we might
 546                                         * still need to write out the
 547                                         * bad block log - better give it
 548                                         * a chance*/
 549                                        md_check_recovery(conf->mddev);
 550                                }
 551                                md_wait_for_blocked_rdev(rdev, conf->mddev);
 552                        } else {
 553                                /* Acknowledged bad block - skip the write */
 554                                rdev_dec_pending(rdev, conf->mddev);
 555                                rdev = NULL;
 556                        }
 557                }
 558
 559                if (rdev) {
 560                        if (s->syncing || s->expanding || s->expanded)
 561                                md_sync_acct(rdev->bdev, STRIPE_SECTORS);
 562
 563                        set_bit(STRIPE_IO_STARTED, &sh->state);
 564
 565                        bi->bi_bdev = rdev->bdev;
 566                        pr_debug("%s: for %llu schedule op %ld on disc %d\n",
 567                                __func__, (unsigned long long)sh->sector,
 568                                bi->bi_rw, i);
 569                        atomic_inc(&sh->count);
 570                        bi->bi_sector = sh->sector + rdev->data_offset;
 571                        bi->bi_flags = 1 << BIO_UPTODATE;
 572                        bi->bi_vcnt = 1;
 573                        bi->bi_max_vecs = 1;
 574                        bi->bi_idx = 0;
 575                        bi->bi_io_vec = &sh->dev[i].vec;
 576                        bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
 577                        bi->bi_io_vec[0].bv_offset = 0;
 578                        bi->bi_size = STRIPE_SIZE;
 579                        bi->bi_next = NULL;
 580                        generic_make_request(bi);
 581                } else {
 582                        if (rw & WRITE)
 583                                set_bit(STRIPE_DEGRADED, &sh->state);
 584                        pr_debug("skip op %ld on disc %d for sector %llu\n",
 585                                bi->bi_rw, i, (unsigned long long)sh->sector);
 586                        clear_bit(R5_LOCKED, &sh->dev[i].flags);
 587                        set_bit(STRIPE_HANDLE, &sh->state);
 588                }
 589        }
 590}
 591
 592static struct dma_async_tx_descriptor *
 593async_copy_data(int frombio, struct bio *bio, struct page *page,
 594        sector_t sector, struct dma_async_tx_descriptor *tx)
 595{
 596        struct bio_vec *bvl;
 597        struct page *bio_page;
 598        int i;
 599        int page_offset;
 600        struct async_submit_ctl submit;
 601        enum async_tx_flags flags = 0;
 602
 603        if (bio->bi_sector >= sector)
 604                page_offset = (signed)(bio->bi_sector - sector) * 512;
 605        else
 606                page_offset = (signed)(sector - bio->bi_sector) * -512;
 607
 608        if (frombio)
 609                flags |= ASYNC_TX_FENCE;
 610        init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
 611
 612        bio_for_each_segment(bvl, bio, i) {
 613                int len = bvl->bv_len;
 614                int clen;
 615                int b_offset = 0;
 616
 617                if (page_offset < 0) {
 618                        b_offset = -page_offset;
 619                        page_offset += b_offset;
 620                        len -= b_offset;
 621                }
 622
 623                if (len > 0 && page_offset + len > STRIPE_SIZE)
 624                        clen = STRIPE_SIZE - page_offset;
 625                else
 626                        clen = len;
 627
 628                if (clen > 0) {
 629                        b_offset += bvl->bv_offset;
 630                        bio_page = bvl->bv_page;
 631                        if (frombio)
 632                                tx = async_memcpy(page, bio_page, page_offset,
 633                                                  b_offset, clen, &submit);
 634                        else
 635                                tx = async_memcpy(bio_page, page, b_offset,
 636                                                  page_offset, clen, &submit);
 637                }
 638                /* chain the operations */
 639                submit.depend_tx = tx;
 640
 641                if (clen < len) /* hit end of page */
 642                        break;
 643                page_offset +=  len;
 644        }
 645
 646        return tx;
 647}
 648
 649static void ops_complete_biofill(void *stripe_head_ref)
 650{
 651        struct stripe_head *sh = stripe_head_ref;
 652        struct bio *return_bi = NULL;
 653        raid5_conf_t *conf = sh->raid_conf;
 654        int i;
 655
 656        pr_debug("%s: stripe %llu\n", __func__,
 657                (unsigned long long)sh->sector);
 658
 659        /* clear completed biofills */
 660        spin_lock_irq(&conf->device_lock);
 661        for (i = sh->disks; i--; ) {
 662                struct r5dev *dev = &sh->dev[i];
 663
 664                /* acknowledge completion of a biofill operation */
 665                /* and check if we need to reply to a read request,
 666                 * new R5_Wantfill requests are held off until
 667                 * !STRIPE_BIOFILL_RUN
 668                 */
 669                if (test_and_clear_bit(R5_Wantfill, &dev->flags)) {
 670                        struct bio *rbi, *rbi2;
 671
 672                        BUG_ON(!dev->read);
 673                        rbi = dev->read;
 674                        dev->read = NULL;
 675                        while (rbi && rbi->bi_sector <
 676                                dev->sector + STRIPE_SECTORS) {
 677                                rbi2 = r5_next_bio(rbi, dev->sector);
 678                                if (!raid5_dec_bi_phys_segments(rbi)) {
 679                                        rbi->bi_next = return_bi;
 680                                        return_bi = rbi;
 681                                }
 682                                rbi = rbi2;
 683                        }
 684                }
 685        }
 686        spin_unlock_irq(&conf->device_lock);
 687        clear_bit(STRIPE_BIOFILL_RUN, &sh->state);
 688
 689        return_io(return_bi);
 690
 691        set_bit(STRIPE_HANDLE, &sh->state);
 692        release_stripe(sh);
 693}
 694
 695static void ops_run_biofill(struct stripe_head *sh)
 696{
 697        struct dma_async_tx_descriptor *tx = NULL;
 698        raid5_conf_t *conf = sh->raid_conf;
 699        struct async_submit_ctl submit;
 700        int i;
 701
 702        pr_debug("%s: stripe %llu\n", __func__,
 703                (unsigned long long)sh->sector);
 704
 705        for (i = sh->disks; i--; ) {
 706                struct r5dev *dev = &sh->dev[i];
 707                if (test_bit(R5_Wantfill, &dev->flags)) {
 708                        struct bio *rbi;
 709                        spin_lock_irq(&conf->device_lock);
 710                        dev->read = rbi = dev->toread;
 711                        dev->toread = NULL;
 712                        spin_unlock_irq(&conf->device_lock);
 713                        while (rbi && rbi->bi_sector <
 714                                dev->sector + STRIPE_SECTORS) {
 715                                tx = async_copy_data(0, rbi, dev->page,
 716                                        dev->sector, tx);
 717                                rbi = r5_next_bio(rbi, dev->sector);
 718                        }
 719                }
 720        }
 721
 722        atomic_inc(&sh->count);
 723        init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL);
 724        async_trigger_callback(&submit);
 725}
 726
 727static void mark_target_uptodate(struct stripe_head *sh, int target)
 728{
 729        struct r5dev *tgt;
 730
 731        if (target < 0)
 732                return;
 733
 734        tgt = &sh->dev[target];
 735        set_bit(R5_UPTODATE, &tgt->flags);
 736        BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
 737        clear_bit(R5_Wantcompute, &tgt->flags);
 738}
 739
 740static void ops_complete_compute(void *stripe_head_ref)
 741{
 742        struct stripe_head *sh = stripe_head_ref;
 743
 744        pr_debug("%s: stripe %llu\n", __func__,
 745                (unsigned long long)sh->sector);
 746
 747        /* mark the computed target(s) as uptodate */
 748        mark_target_uptodate(sh, sh->ops.target);
 749        mark_target_uptodate(sh, sh->ops.target2);
 750
 751        clear_bit(STRIPE_COMPUTE_RUN, &sh->state);
 752        if (sh->check_state == check_state_compute_run)
 753                sh->check_state = check_state_compute_result;
 754        set_bit(STRIPE_HANDLE, &sh->state);
 755        release_stripe(sh);
 756}
 757
 758/* return a pointer to the address conversion region of the scribble buffer */
 759static addr_conv_t *to_addr_conv(struct stripe_head *sh,
 760                                 struct raid5_percpu *percpu)
 761{
 762        return percpu->scribble + sizeof(struct page *) * (sh->disks + 2);
 763}
 764
 765static struct dma_async_tx_descriptor *
 766ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
 767{
 768        int disks = sh->disks;
 769        struct page **xor_srcs = percpu->scribble;
 770        int target = sh->ops.target;
 771        struct r5dev *tgt = &sh->dev[target];
 772        struct page *xor_dest = tgt->page;
 773        int count = 0;
 774        struct dma_async_tx_descriptor *tx;
 775        struct async_submit_ctl submit;
 776        int i;
 777
 778        pr_debug("%s: stripe %llu block: %d\n",
 779                __func__, (unsigned long long)sh->sector, target);
 780        BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
 781
 782        for (i = disks; i--; )
 783                if (i != target)
 784                        xor_srcs[count++] = sh->dev[i].page;
 785
 786        atomic_inc(&sh->count);
 787
 788        init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL,
 789                          ops_complete_compute, sh, to_addr_conv(sh, percpu));
 790        if (unlikely(count == 1))
 791                tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
 792        else
 793                tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
 794
 795        return tx;
 796}
 797
 798/* set_syndrome_sources - populate source buffers for gen_syndrome
 799 * @srcs - (struct page *) array of size sh->disks
 800 * @sh - stripe_head to parse
 801 *
 802 * Populates srcs in proper layout order for the stripe and returns the
 803 * 'count' of sources to be used in a call to async_gen_syndrome.  The P
 804 * destination buffer is recorded in srcs[count] and the Q destination
 805 * is recorded in srcs[count+1]].
 806 */
 807static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh)
 808{
 809        int disks = sh->disks;
 810        int syndrome_disks = sh->ddf_layout ? disks : (disks - 2);
 811        int d0_idx = raid6_d0(sh);
 812        int count;
 813        int i;
 814
 815        for (i = 0; i < disks; i++)
 816                srcs[i] = NULL;
 817
 818        count = 0;
 819        i = d0_idx;
 820        do {
 821                int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
 822
 823                srcs[slot] = sh->dev[i].page;
 824                i = raid6_next_disk(i, disks);
 825        } while (i != d0_idx);
 826
 827        return syndrome_disks;
 828}
 829
 830static struct dma_async_tx_descriptor *
 831ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
 832{
 833        int disks = sh->disks;
 834        struct page **blocks = percpu->scribble;
 835        int target;
 836        int qd_idx = sh->qd_idx;
 837        struct dma_async_tx_descriptor *tx;
 838        struct async_submit_ctl submit;
 839        struct r5dev *tgt;
 840        struct page *dest;
 841        int i;
 842        int count;
 843
 844        if (sh->ops.target < 0)
 845                target = sh->ops.target2;
 846        else if (sh->ops.target2 < 0)
 847                target = sh->ops.target;
 848        else
 849                /* we should only have one valid target */
 850                BUG();
 851        BUG_ON(target < 0);
 852        pr_debug("%s: stripe %llu block: %d\n",
 853                __func__, (unsigned long long)sh->sector, target);
 854
 855        tgt = &sh->dev[target];
 856        BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
 857        dest = tgt->page;
 858
 859        atomic_inc(&sh->count);
 860
 861        if (target == qd_idx) {
 862                count = set_syndrome_sources(blocks, sh);
 863                blocks[count] = NULL; /* regenerating p is not necessary */
 864                BUG_ON(blocks[count+1] != dest); /* q should already be set */
 865                init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
 866                                  ops_complete_compute, sh,
 867                                  to_addr_conv(sh, percpu));
 868                tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
 869        } else {
 870                /* Compute any data- or p-drive using XOR */
 871                count = 0;
 872                for (i = disks; i-- ; ) {
 873                        if (i == target || i == qd_idx)
 874                                continue;
 875                        blocks[count++] = sh->dev[i].page;
 876                }
 877
 878                init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
 879                                  NULL, ops_complete_compute, sh,
 880                                  to_addr_conv(sh, percpu));
 881                tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit);
 882        }
 883
 884        return tx;
 885}
 886
 887static struct dma_async_tx_descriptor *
 888ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
 889{
 890        int i, count, disks = sh->disks;
 891        int syndrome_disks = sh->ddf_layout ? disks : disks-2;
 892        int d0_idx = raid6_d0(sh);
 893        int faila = -1, failb = -1;
 894        int target = sh->ops.target;
 895        int target2 = sh->ops.target2;
 896        struct r5dev *tgt = &sh->dev[target];
 897        struct r5dev *tgt2 = &sh->dev[target2];
 898        struct dma_async_tx_descriptor *tx;
 899        struct page **blocks = percpu->scribble;
 900        struct async_submit_ctl submit;
 901
 902        pr_debug("%s: stripe %llu block1: %d block2: %d\n",
 903                 __func__, (unsigned long long)sh->sector, target, target2);
 904        BUG_ON(target < 0 || target2 < 0);
 905        BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
 906        BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags));
 907
 908        /* we need to open-code set_syndrome_sources to handle the
 909         * slot number conversion for 'faila' and 'failb'
 910         */
 911        for (i = 0; i < disks ; i++)
 912                blocks[i] = NULL;
 913        count = 0;
 914        i = d0_idx;
 915        do {
 916                int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
 917
 918                blocks[slot] = sh->dev[i].page;
 919
 920                if (i == target)
 921                        faila = slot;
 922                if (i == target2)
 923                        failb = slot;
 924                i = raid6_next_disk(i, disks);
 925        } while (i != d0_idx);
 926
 927        BUG_ON(faila == failb);
 928        if (failb < faila)
 929                swap(faila, failb);
 930        pr_debug("%s: stripe: %llu faila: %d failb: %d\n",
 931                 __func__, (unsigned long long)sh->sector, faila, failb);
 932
 933        atomic_inc(&sh->count);
 934
 935        if (failb == syndrome_disks+1) {
 936                /* Q disk is one of the missing disks */
 937                if (faila == syndrome_disks) {
 938                        /* Missing P+Q, just recompute */
 939                        init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
 940                                          ops_complete_compute, sh,
 941                                          to_addr_conv(sh, percpu));
 942                        return async_gen_syndrome(blocks, 0, syndrome_disks+2,
 943                                                  STRIPE_SIZE, &submit);
 944                } else {
 945                        struct page *dest;
 946                        int data_target;
 947                        int qd_idx = sh->qd_idx;
 948
 949                        /* Missing D+Q: recompute D from P, then recompute Q */
 950                        if (target == qd_idx)
 951                                data_target = target2;
 952                        else
 953                                data_target = target;
 954
 955                        count = 0;
 956                        for (i = disks; i-- ; ) {
 957                                if (i == data_target || i == qd_idx)
 958                                        continue;
 959                                blocks[count++] = sh->dev[i].page;
 960                        }
 961                        dest = sh->dev[data_target].page;
 962                        init_async_submit(&submit,
 963                                          ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
 964                                          NULL, NULL, NULL,
 965                                          to_addr_conv(sh, percpu));
 966                        tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE,
 967                                       &submit);
 968
 969                        count = set_syndrome_sources(blocks, sh);
 970                        init_async_submit(&submit, ASYNC_TX_FENCE, tx,
 971                                          ops_complete_compute, sh,
 972                                          to_addr_conv(sh, percpu));
 973                        return async_gen_syndrome(blocks, 0, count+2,
 974                                                  STRIPE_SIZE, &submit);
 975                }
 976        } else {
 977                init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
 978                                  ops_complete_compute, sh,
 979                                  to_addr_conv(sh, percpu));
 980                if (failb == syndrome_disks) {
 981                        /* We're missing D+P. */
 982                        return async_raid6_datap_recov(syndrome_disks+2,
 983                                                       STRIPE_SIZE, faila,
 984                                                       blocks, &submit);
 985                } else {
 986                        /* We're missing D+D. */
 987                        return async_raid6_2data_recov(syndrome_disks+2,
 988                                                       STRIPE_SIZE, faila, failb,
 989                                                       blocks, &submit);
 990                }
 991        }
 992}
 993
 994
 995static void ops_complete_prexor(void *stripe_head_ref)
 996{
 997        struct stripe_head *sh = stripe_head_ref;
 998
 999        pr_debug("%s: stripe %llu\n", __func__,
1000                (unsigned long long)sh->sector);
1001}
1002
1003static struct dma_async_tx_descriptor *
1004ops_run_prexor(struct stripe_head *sh, struct raid5_percpu *percpu,
1005               struct dma_async_tx_descriptor *tx)
1006{
1007        int disks = sh->disks;
1008        struct page **xor_srcs = percpu->scribble;
1009        int count = 0, pd_idx = sh->pd_idx, i;
1010        struct async_submit_ctl submit;
1011
1012        /* existing parity data subtracted */
1013        struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
1014
1015        pr_debug("%s: stripe %llu\n", __func__,
1016                (unsigned long long)sh->sector);
1017
1018        for (i = disks; i--; ) {
1019                struct r5dev *dev = &sh->dev[i];
1020                /* Only process blocks that are known to be uptodate */
1021                if (test_bit(R5_Wantdrain, &dev->flags))
1022                        xor_srcs[count++] = dev->page;
1023        }
1024
1025        init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
1026                          ops_complete_prexor, sh, to_addr_conv(sh, percpu));
1027        tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
1028
1029        return tx;
1030}
1031
1032static struct dma_async_tx_descriptor *
1033ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
1034{
1035        int disks = sh->disks;
1036        int i;
1037
1038        pr_debug("%s: stripe %llu\n", __func__,
1039                (unsigned long long)sh->sector);
1040
1041        for (i = disks; i--; ) {
1042                struct r5dev *dev = &sh->dev[i];
1043                struct bio *chosen;
1044
1045                if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) {
1046                        struct bio *wbi;
1047
1048                        spin_lock_irq(&sh->raid_conf->device_lock);
1049                        chosen = dev->towrite;
1050                        dev->towrite = NULL;
1051                        BUG_ON(dev->written);
1052                        wbi = dev->written = chosen;
1053                        spin_unlock_irq(&sh->raid_conf->device_lock);
1054
1055                        while (wbi && wbi->bi_sector <
1056                                dev->sector + STRIPE_SECTORS) {
1057                                if (wbi->bi_rw & REQ_FUA)
1058                                        set_bit(R5_WantFUA, &dev->flags);
1059                                tx = async_copy_data(1, wbi, dev->page,
1060                                        dev->sector, tx);
1061                                wbi = r5_next_bio(wbi, dev->sector);
1062                        }
1063                }
1064        }
1065
1066        return tx;
1067}
1068
1069static void ops_complete_reconstruct(void *stripe_head_ref)
1070{
1071        struct stripe_head *sh = stripe_head_ref;
1072        int disks = sh->disks;
1073        int pd_idx = sh->pd_idx;
1074        int qd_idx = sh->qd_idx;
1075        int i;
1076        bool fua = false;
1077
1078        pr_debug("%s: stripe %llu\n", __func__,
1079                (unsigned long long)sh->sector);
1080
1081        for (i = disks; i--; )
1082                fua |= test_bit(R5_WantFUA, &sh->dev[i].flags);
1083
1084        for (i = disks; i--; ) {
1085                struct r5dev *dev = &sh->dev[i];
1086
1087                if (dev->written || i == pd_idx || i == qd_idx) {
1088                        set_bit(R5_UPTODATE, &dev->flags);
1089                        if (fua)
1090                                set_bit(R5_WantFUA, &dev->flags);
1091                }
1092        }
1093
1094        if (sh->reconstruct_state == reconstruct_state_drain_run)
1095                sh->reconstruct_state = reconstruct_state_drain_result;
1096        else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run)
1097                sh->reconstruct_state = reconstruct_state_prexor_drain_result;
1098        else {
1099                BUG_ON(sh->reconstruct_state != reconstruct_state_run);
1100                sh->reconstruct_state = reconstruct_state_result;
1101        }
1102
1103        set_bit(STRIPE_HANDLE, &sh->state);
1104        release_stripe(sh);
1105}
1106
1107static void
1108ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
1109                     struct dma_async_tx_descriptor *tx)
1110{
1111        int disks = sh->disks;
1112        struct page **xor_srcs = percpu->scribble;
1113        struct async_submit_ctl submit;
1114        int count = 0, pd_idx = sh->pd_idx, i;
1115        struct page *xor_dest;
1116        int prexor = 0;
1117        unsigned long flags;
1118
1119        pr_debug("%s: stripe %llu\n", __func__,
1120                (unsigned long long)sh->sector);
1121
1122        /* check if prexor is active which means only process blocks
1123         * that are part of a read-modify-write (written)
1124         */
1125        if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
1126                prexor = 1;
1127                xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
1128                for (i = disks; i--; ) {
1129                        struct r5dev *dev = &sh->dev[i];
1130                        if (dev->written)
1131                                xor_srcs[count++] = dev->page;
1132                }
1133        } else {
1134                xor_dest = sh->dev[pd_idx].page;
1135                for (i = disks; i--; ) {
1136                        struct r5dev *dev = &sh->dev[i];
1137                        if (i != pd_idx)
1138                                xor_srcs[count++] = dev->page;
1139                }
1140        }
1141
1142        /* 1/ if we prexor'd then the dest is reused as a source
1143         * 2/ if we did not prexor then we are redoing the parity
1144         * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
1145         * for the synchronous xor case
1146         */
1147        flags = ASYNC_TX_ACK |
1148                (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST);
1149
1150        atomic_inc(&sh->count);
1151
1152        init_async_submit(&submit, flags, tx, ops_complete_reconstruct, sh,
1153                          to_addr_conv(sh, percpu));
1154        if (unlikely(count == 1))
1155                tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
1156        else
1157                tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
1158}
1159
1160static void
1161ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu,
1162                     struct dma_async_tx_descriptor *tx)
1163{
1164        struct async_submit_ctl submit;
1165        struct page **blocks = percpu->scribble;
1166        int count;
1167
1168        pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
1169
1170        count = set_syndrome_sources(blocks, sh);
1171
1172        atomic_inc(&sh->count);
1173
1174        init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_reconstruct,
1175                          sh, to_addr_conv(sh, percpu));
1176        async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE,  &submit);
1177}
1178
1179static void ops_complete_check(void *stripe_head_ref)
1180{
1181        struct stripe_head *sh = stripe_head_ref;
1182
1183        pr_debug("%s: stripe %llu\n", __func__,
1184                (unsigned long long)sh->sector);
1185
1186        sh->check_state = check_state_check_result;
1187        set_bit(STRIPE_HANDLE, &sh->state);
1188        release_stripe(sh);
1189}
1190
1191static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
1192{
1193        int disks = sh->disks;
1194        int pd_idx = sh->pd_idx;
1195        int qd_idx = sh->qd_idx;
1196        struct page *xor_dest;
1197        struct page **xor_srcs = percpu->scribble;
1198        struct dma_async_tx_descriptor *tx;
1199        struct async_submit_ctl submit;
1200        int count;
1201        int i;
1202
1203        pr_debug("%s: stripe %llu\n", __func__,
1204                (unsigned long long)sh->sector);
1205
1206        count = 0;
1207        xor_dest = sh->dev[pd_idx].page;
1208        xor_srcs[count++] = xor_dest;
1209        for (i = disks; i--; ) {
1210                if (i == pd_idx || i == qd_idx)
1211                        continue;
1212                xor_srcs[count++] = sh->dev[i].page;
1213        }
1214
1215        init_async_submit(&submit, 0, NULL, NULL, NULL,
1216                          to_addr_conv(sh, percpu));
1217        tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
1218                           &sh->ops.zero_sum_result, &submit);
1219
1220        atomic_inc(&sh->count);
1221        init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL);
1222        tx = async_trigger_callback(&submit);
1223}
1224
1225static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp)
1226{
1227        struct page **srcs = percpu->scribble;
1228        struct async_submit_ctl submit;
1229        int count;
1230
1231        pr_debug("%s: stripe %llu checkp: %d\n", __func__,
1232                (unsigned long long)sh->sector, checkp);
1233
1234        count = set_syndrome_sources(srcs, sh);
1235        if (!checkp)
1236                srcs[count] = NULL;
1237
1238        atomic_inc(&sh->count);
1239        init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check,
1240                          sh, to_addr_conv(sh, percpu));
1241        async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE,
1242                           &sh->ops.zero_sum_result, percpu->spare_page, &submit);
1243}
1244
1245static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
1246{
1247        int overlap_clear = 0, i, disks = sh->disks;
1248        struct dma_async_tx_descriptor *tx = NULL;
1249        raid5_conf_t *conf = sh->raid_conf;
1250        int level = conf->level;
1251        struct raid5_percpu *percpu;
1252        unsigned long cpu;
1253
1254        cpu = get_cpu();
1255        percpu = per_cpu_ptr(conf->percpu, cpu);
1256        if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
1257                ops_run_biofill(sh);
1258                overlap_clear++;
1259        }
1260
1261        if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) {
1262                if (level < 6)
1263                        tx = ops_run_compute5(sh, percpu);
1264                else {
1265                        if (sh->ops.target2 < 0 || sh->ops.target < 0)
1266                                tx = ops_run_compute6_1(sh, percpu);
1267                        else
1268                                tx = ops_run_compute6_2(sh, percpu);
1269                }
1270                /* terminate the chain if reconstruct is not set to be run */
1271                if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request))
1272                        async_tx_ack(tx);
1273        }
1274
1275        if (test_bit(STRIPE_OP_PREXOR, &ops_request))
1276                tx = ops_run_prexor(sh, percpu, tx);
1277
1278        if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) {
1279                tx = ops_run_biodrain(sh, tx);
1280                overlap_clear++;
1281        }
1282
1283        if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) {
1284                if (level < 6)
1285                        ops_run_reconstruct5(sh, percpu, tx);
1286                else
1287                        ops_run_reconstruct6(sh, percpu, tx);
1288        }
1289
1290        if (test_bit(STRIPE_OP_CHECK, &ops_request)) {
1291                if (sh->check_state == check_state_run)
1292                        ops_run_check_p(sh, percpu);
1293                else if (sh->check_state == check_state_run_q)
1294                        ops_run_check_pq(sh, percpu, 0);
1295                else if (sh->check_state == check_state_run_pq)
1296                        ops_run_check_pq(sh, percpu, 1);
1297                else
1298                        BUG();
1299        }
1300
1301        if (overlap_clear)
1302                for (i = disks; i--; ) {
1303                        struct r5dev *dev = &sh->dev[i];
1304                        if (test_and_clear_bit(R5_Overlap, &dev->flags))
1305                                wake_up(&sh->raid_conf->wait_for_overlap);
1306                }
1307        put_cpu();
1308}
1309
1310#ifdef CONFIG_MULTICORE_RAID456
1311static void async_run_ops(void *param, async_cookie_t cookie)
1312{
1313        struct stripe_head *sh = param;
1314        unsigned long ops_request = sh->ops.request;
1315
1316        clear_bit_unlock(STRIPE_OPS_REQ_PENDING, &sh->state);
1317        wake_up(&sh->ops.wait_for_ops);
1318
1319        __raid_run_ops(sh, ops_request);
1320        release_stripe(sh);
1321}
1322
1323static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
1324{
1325        /* since handle_stripe can be called outside of raid5d context
1326         * we need to ensure sh->ops.request is de-staged before another
1327         * request arrives
1328         */
1329        wait_event(sh->ops.wait_for_ops,
1330                   !test_and_set_bit_lock(STRIPE_OPS_REQ_PENDING, &sh->state));
1331        sh->ops.request = ops_request;
1332
1333        atomic_inc(&sh->count);
1334        async_schedule(async_run_ops, sh);
1335}
1336#else
1337#define raid_run_ops __raid_run_ops
1338#endif
1339
1340static int grow_one_stripe(raid5_conf_t *conf)
1341{
1342        struct stripe_head *sh;
1343        sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL);
1344        if (!sh)
1345                return 0;
1346
1347        sh->raid_conf = conf;
1348        #ifdef CONFIG_MULTICORE_RAID456
1349        init_waitqueue_head(&sh->ops.wait_for_ops);
1350        #endif
1351
1352        if (grow_buffers(sh)) {
1353                shrink_buffers(sh);
1354                kmem_cache_free(conf->slab_cache, sh);
1355                return 0;
1356        }
1357        /* we just created an active stripe so... */
1358        atomic_set(&sh->count, 1);
1359        atomic_inc(&conf->active_stripes);
1360        INIT_LIST_HEAD(&sh->lru);
1361        release_stripe(sh);
1362        return 1;
1363}
1364
1365static int grow_stripes(raid5_conf_t *conf, int num)
1366{
1367        struct kmem_cache *sc;
1368        int devs = max(conf->raid_disks, conf->previous_raid_disks);
1369
1370        if (conf->mddev->gendisk)
1371                sprintf(conf->cache_name[0],
1372                        "raid%d-%s", conf->level, mdname(conf->mddev));
1373        else
1374                sprintf(conf->cache_name[0],
1375                        "raid%d-%p", conf->level, conf->mddev);
1376        sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
1377
1378        conf->active_name = 0;
1379        sc = kmem_cache_create(conf->cache_name[conf->active_name],
1380                               sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
1381                               0, 0, NULL);
1382        if (!sc)
1383                return 1;
1384        conf->slab_cache = sc;
1385        conf->pool_size = devs;
1386        while (num--)
1387                if (!grow_one_stripe(conf))
1388                        return 1;
1389        return 0;
1390}
1391
1392/**
1393 * scribble_len - return the required size of the scribble region
1394 * @num - total number of disks in the array
1395 *
1396 * The size must be enough to contain:
1397 * 1/ a struct page pointer for each device in the array +2
1398 * 2/ room to convert each entry in (1) to its corresponding dma
1399 *    (dma_map_page()) or page (page_address()) address.
1400 *
1401 * Note: the +2 is for the destination buffers of the ddf/raid6 case where we
1402 * calculate over all devices (not just the data blocks), using zeros in place
1403 * of the P and Q blocks.
1404 */
1405static size_t scribble_len(int num)
1406{
1407        size_t len;
1408
1409        len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2);
1410
1411        return len;
1412}
1413
1414static int resize_stripes(raid5_conf_t *conf, int newsize)
1415{
1416        /* Make all the stripes able to hold 'newsize' devices.
1417         * New slots in each stripe get 'page' set to a new page.
1418         *
1419         * This happens in stages:
1420         * 1/ create a new kmem_cache and allocate the required number of
1421         *    stripe_heads.
1422         * 2/ gather all the old stripe_heads and tranfer the pages across
1423         *    to the new stripe_heads.  This will have the side effect of
1424         *    freezing the array as once all stripe_heads have been collected,
1425         *    no IO will be possible.  Old stripe heads are freed once their
1426         *    pages have been transferred over, and the old kmem_cache is
1427         *    freed when all stripes are done.
1428         * 3/ reallocate conf->disks to be suitable bigger.  If this fails,
1429         *    we simple return a failre status - no need to clean anything up.
1430         * 4/ allocate new pages for the new slots in the new stripe_heads.
1431         *    If this fails, we don't bother trying the shrink the
1432         *    stripe_heads down again, we just leave them as they are.
1433         *    As each stripe_head is processed the new one is released into
1434         *    active service.
1435         *
1436         * Once step2 is started, we cannot afford to wait for a write,
1437         * so we use GFP_NOIO allocations.
1438         */
1439        struct stripe_head *osh, *nsh;
1440        LIST_HEAD(newstripes);
1441        struct disk_info *ndisks;
1442        unsigned long cpu;
1443        int err;
1444        struct kmem_cache *sc;
1445        int i;
1446
1447        if (newsize <= conf->pool_size)
1448                return 0; /* never bother to shrink */
1449
1450        err = md_allow_write(conf->mddev);
1451        if (err)
1452                return err;
1453
1454        /* Step 1 */
1455        sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
1456                               sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
1457                               0, 0, NULL);
1458        if (!sc)
1459                return -ENOMEM;
1460
1461        for (i = conf->max_nr_stripes; i; i--) {
1462                nsh = kmem_cache_zalloc(sc, GFP_KERNEL);
1463                if (!nsh)
1464                        break;
1465
1466                nsh->raid_conf = conf;
1467                #ifdef CONFIG_MULTICORE_RAID456
1468                init_waitqueue_head(&nsh->ops.wait_for_ops);
1469                #endif
1470
1471                list_add(&nsh->lru, &newstripes);
1472        }
1473        if (i) {
1474                /* didn't get enough, give up */
1475                while (!list_empty(&newstripes)) {
1476                        nsh = list_entry(newstripes.next, struct stripe_head, lru);
1477                        list_del(&nsh->lru);
1478                        kmem_cache_free(sc, nsh);
1479                }
1480                kmem_cache_destroy(sc);
1481                return -ENOMEM;
1482        }
1483        /* Step 2 - Must use GFP_NOIO now.
1484         * OK, we have enough stripes, start collecting inactive
1485         * stripes and copying them over
1486         */
1487        list_for_each_entry(nsh, &newstripes, lru) {
1488                spin_lock_irq(&conf->device_lock);
1489                wait_event_lock_irq(conf->wait_for_stripe,
1490                                    !list_empty(&conf->inactive_list),
1491                                    conf->device_lock,
1492                                    );
1493                osh = get_free_stripe(conf);
1494                spin_unlock_irq(&conf->device_lock);
1495                atomic_set(&nsh->count, 1);
1496                for(i=0; i<conf->pool_size; i++)
1497                        nsh->dev[i].page = osh->dev[i].page;
1498                for( ; i<newsize; i++)
1499                        nsh->dev[i].page = NULL;
1500                kmem_cache_free(conf->slab_cache, osh);
1501        }
1502        kmem_cache_destroy(conf->slab_cache);
1503
1504        /* Step 3.
1505         * At this point, we are holding all the stripes so the array
1506         * is completely stalled, so now is a good time to resize
1507         * conf->disks and the scribble region
1508         */
1509        ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO);
1510        if (ndisks) {
1511                for (i=0; i<conf->raid_disks; i++)
1512                        ndisks[i] = conf->disks[i];
1513                kfree(conf->disks);
1514                conf->disks = ndisks;
1515        } else
1516                err = -ENOMEM;
1517
1518        get_online_cpus();
1519        conf->scribble_len = scribble_len(newsize);
1520        for_each_present_cpu(cpu) {
1521                struct raid5_percpu *percpu;
1522                void *scribble;
1523
1524                percpu = per_cpu_ptr(conf->percpu, cpu);
1525                scribble = kmalloc(conf->scribble_len, GFP_NOIO);
1526
1527                if (scribble) {
1528                        kfree(percpu->scribble);
1529                        percpu->scribble = scribble;
1530                } else {
1531                        err = -ENOMEM;
1532                        break;
1533                }
1534        }
1535        put_online_cpus();
1536
1537        /* Step 4, return new stripes to service */
1538        while(!list_empty(&newstripes)) {
1539                nsh = list_entry(newstripes.next, struct stripe_head, lru);
1540                list_del_init(&nsh->lru);
1541
1542                for (i=conf->raid_disks; i < newsize; i++)
1543                        if (nsh->dev[i].page == NULL) {
1544                                struct page *p = alloc_page(GFP_NOIO);
1545                                nsh->dev[i].page = p;
1546                                if (!p)
1547                                        err = -ENOMEM;
1548                        }
1549                release_stripe(nsh);
1550        }
1551        /* critical section pass, GFP_NOIO no longer needed */
1552
1553        conf->slab_cache = sc;
1554        conf->active_name = 1-conf->active_name;
1555        conf->pool_size = newsize;
1556        return err;
1557}
1558
1559static int drop_one_stripe(raid5_conf_t *conf)
1560{
1561        struct stripe_head *sh;
1562
1563        spin_lock_irq(&conf->device_lock);
1564        sh = get_free_stripe(conf);
1565        spin_unlock_irq(&conf->device_lock);
1566        if (!sh)
1567                return 0;
1568        BUG_ON(atomic_read(&sh->count));
1569        shrink_buffers(sh);
1570        kmem_cache_free(conf->slab_cache, sh);
1571        atomic_dec(&conf->active_stripes);
1572        return 1;
1573}
1574
1575static void shrink_stripes(raid5_conf_t *conf)
1576{
1577        while (drop_one_stripe(conf))
1578                ;
1579
1580        if (conf->slab_cache)
1581                kmem_cache_destroy(conf->slab_cache);
1582        conf->slab_cache = NULL;
1583}
1584
1585static void raid5_end_read_request(struct bio * bi, int error)
1586{
1587        struct stripe_head *sh = bi->bi_private;
1588        raid5_conf_t *conf = sh->raid_conf;
1589        int disks = sh->disks, i;
1590        int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
1591        char b[BDEVNAME_SIZE];
1592        mdk_rdev_t *rdev;
1593
1594
1595        for (i=0 ; i<disks; i++)
1596                if (bi == &sh->dev[i].req)
1597                        break;
1598
1599        pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
1600                (unsigned long long)sh->sector, i, atomic_read(&sh->count),
1601                uptodate);
1602        if (i == disks) {
1603                BUG();
1604                return;
1605        }
1606
1607        if (uptodate) {
1608                set_bit(R5_UPTODATE, &sh->dev[i].flags);
1609                if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
1610                        rdev = conf->disks[i].rdev;
1611                        printk_ratelimited(
1612                                KERN_INFO
1613                                "md/raid:%s: read error corrected"
1614                                " (%lu sectors at %llu on %s)\n",
1615                                mdname(conf->mddev), STRIPE_SECTORS,
1616                                (unsigned long long)(sh->sector
1617                                                     + rdev->data_offset),
1618                                bdevname(rdev->bdev, b));
1619                        atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
1620                        clear_bit(R5_ReadError, &sh->dev[i].flags);
1621                        clear_bit(R5_ReWrite, &sh->dev[i].flags);
1622                }
1623                if (atomic_read(&conf->disks[i].rdev->read_errors))
1624                        atomic_set(&conf->disks[i].rdev->read_errors, 0);
1625        } else {
1626                const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
1627                int retry = 0;
1628                rdev = conf->disks[i].rdev;
1629
1630                clear_bit(R5_UPTODATE, &sh->dev[i].flags);
1631                atomic_inc(&rdev->read_errors);
1632                if (conf->mddev->degraded >= conf->max_degraded)
1633                        printk_ratelimited(
1634                                KERN_WARNING
1635                                "md/raid:%s: read error not correctable "
1636                                "(sector %llu on %s).\n",
1637                                mdname(conf->mddev),
1638                                (unsigned long long)(sh->sector
1639                                                     + rdev->data_offset),
1640                                bdn);
1641                else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
1642                        /* Oh, no!!! */
1643                        printk_ratelimited(
1644                                KERN_WARNING
1645                                "md/raid:%s: read error NOT corrected!! "
1646                                "(sector %llu on %s).\n",
1647                                mdname(conf->mddev),
1648                                (unsigned long long)(sh->sector
1649                                                     + rdev->data_offset),
1650                                bdn);
1651                else if (atomic_read(&rdev->read_errors)
1652                         > conf->max_nr_stripes)
1653                        printk(KERN_WARNING
1654                               "md/raid:%s: Too many read errors, failing device %s.\n",
1655                               mdname(conf->mddev), bdn);
1656                else
1657                        retry = 1;
1658                if (retry)
1659                        set_bit(R5_ReadError, &sh->dev[i].flags);
1660                else {
1661                        clear_bit(R5_ReadError, &sh->dev[i].flags);
1662                        clear_bit(R5_ReWrite, &sh->dev[i].flags);
1663                        md_error(conf->mddev, rdev);
1664                }
1665        }
1666        rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
1667        clear_bit(R5_LOCKED, &sh->dev[i].flags);
1668        set_bit(STRIPE_HANDLE, &sh->state);
1669        release_stripe(sh);
1670}
1671
1672static void raid5_end_write_request(struct bio *bi, int error)
1673{
1674        struct stripe_head *sh = bi->bi_private;
1675        raid5_conf_t *conf = sh->raid_conf;
1676        int disks = sh->disks, i;
1677        int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
1678        sector_t first_bad;
1679        int bad_sectors;
1680
1681        for (i=0 ; i<disks; i++)
1682                if (bi == &sh->dev[i].req)
1683                        break;
1684
1685        pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
1686                (unsigned long long)sh->sector, i, atomic_read(&sh->count),
1687                uptodate);
1688        if (i == disks) {
1689                BUG();
1690                return;
1691        }
1692
1693        if (!uptodate) {
1694                set_bit(WriteErrorSeen, &conf->disks[i].rdev->flags);
1695                set_bit(R5_WriteError, &sh->dev[i].flags);
1696        } else if (is_badblock(conf->disks[i].rdev, sh->sector, STRIPE_SECTORS,
1697                               &first_bad, &bad_sectors))
1698                set_bit(R5_MadeGood, &sh->dev[i].flags);
1699
1700        rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
1701        
1702        clear_bit(R5_LOCKED, &sh->dev[i].flags);
1703        set_bit(STRIPE_HANDLE, &sh->state);
1704        release_stripe(sh);
1705}
1706
1707
1708static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous);
1709        
1710static void raid5_build_block(struct stripe_head *sh, int i, int previous)
1711{
1712        struct r5dev *dev = &sh->dev[i];
1713
1714        bio_init(&dev->req);
1715        dev->req.bi_io_vec = &dev->vec;
1716        dev->req.bi_vcnt++;
1717        dev->req.bi_max_vecs++;
1718        dev->vec.bv_page = dev->page;
1719        dev->vec.bv_len = STRIPE_SIZE;
1720        dev->vec.bv_offset = 0;
1721
1722        dev->req.bi_sector = sh->sector;
1723        dev->req.bi_private = sh;
1724
1725        dev->flags = 0;
1726        dev->sector = compute_blocknr(sh, i, previous);
1727}
1728
1729static void error(mddev_t *mddev, mdk_rdev_t *rdev)
1730{
1731        char b[BDEVNAME_SIZE];
1732        raid5_conf_t *conf = mddev->private;
1733        pr_debug("raid456: error called\n");
1734
1735        if (test_and_clear_bit(In_sync, &rdev->flags)) {
1736                unsigned long flags;
1737                spin_lock_irqsave(&conf->device_lock, flags);
1738                mddev->degraded++;
1739                spin_unlock_irqrestore(&conf->device_lock, flags);
1740                /*
1741                 * if recovery was running, make sure it aborts.
1742                 */
1743                set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1744        }
1745        set_bit(Blocked, &rdev->flags);
1746        set_bit(Faulty, &rdev->flags);
1747        set_bit(MD_CHANGE_DEVS, &mddev->flags);
1748        printk(KERN_ALERT
1749               "md/raid:%s: Disk failure on %s, disabling device.\n"
1750               "md/raid:%s: Operation continuing on %d devices.\n",
1751               mdname(mddev),
1752               bdevname(rdev->bdev, b),
1753               mdname(mddev),
1754               conf->raid_disks - mddev->degraded);
1755}
1756
1757/*
1758 * Input: a 'big' sector number,
1759 * Output: index of the data and parity disk, and the sector # in them.
1760 */
1761static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1762                                     int previous, int *dd_idx,
1763                                     struct stripe_head *sh)
1764{
1765        sector_t stripe, stripe2;
1766        sector_t chunk_number;
1767        unsigned int chunk_offset;
1768        int pd_idx, qd_idx;
1769        int ddf_layout = 0;
1770        sector_t new_sector;
1771        int algorithm = previous ? conf->prev_algo
1772                                 : conf->algorithm;
1773        int sectors_per_chunk = previous ? conf->prev_chunk_sectors
1774                                         : conf->chunk_sectors;
1775        int raid_disks = previous ? conf->previous_raid_disks
1776                                  : conf->raid_disks;
1777        int data_disks = raid_disks - conf->max_degraded;
1778
1779        /* First compute the information on this sector */
1780
1781        /*
1782         * Compute the chunk number and the sector offset inside the chunk
1783         */
1784        chunk_offset = sector_div(r_sector, sectors_per_chunk);
1785        chunk_number = r_sector;
1786
1787        /*
1788         * Compute the stripe number
1789         */
1790        stripe = chunk_number;
1791        *dd_idx = sector_div(stripe, data_disks);
1792        stripe2 = stripe;
1793        /*
1794         * Select the parity disk based on the user selected algorithm.
1795         */
1796        pd_idx = qd_idx = -1;
1797        switch(conf->level) {
1798        case 4:
1799                pd_idx = data_disks;
1800                break;
1801        case 5:
1802                switch (algorithm) {
1803                case ALGORITHM_LEFT_ASYMMETRIC:
1804                        pd_idx = data_disks - sector_div(stripe2, raid_disks);
1805                        if (*dd_idx >= pd_idx)
1806                                (*dd_idx)++;
1807                        break;
1808                case ALGORITHM_RIGHT_ASYMMETRIC:
1809                        pd_idx = sector_div(stripe2, raid_disks);
1810                        if (*dd_idx >= pd_idx)
1811                                (*dd_idx)++;
1812                        break;
1813                case ALGORITHM_LEFT_SYMMETRIC:
1814                        pd_idx = data_disks - sector_div(stripe2, raid_disks);
1815                        *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1816                        break;
1817                case ALGORITHM_RIGHT_SYMMETRIC:
1818                        pd_idx = sector_div(stripe2, raid_disks);
1819                        *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1820                        break;
1821                case ALGORITHM_PARITY_0:
1822                        pd_idx = 0;
1823                        (*dd_idx)++;
1824                        break;
1825                case ALGORITHM_PARITY_N:
1826                        pd_idx = data_disks;
1827                        break;
1828                default:
1829                        BUG();
1830                }
1831                break;
1832        case 6:
1833
1834                switch (algorithm) {
1835                case ALGORITHM_LEFT_ASYMMETRIC:
1836                        pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1837                        qd_idx = pd_idx + 1;
1838                        if (pd_idx == raid_disks-1) {
1839                                (*dd_idx)++;    /* Q D D D P */
1840                                qd_idx = 0;
1841                        } else if (*dd_idx >= pd_idx)
1842                                (*dd_idx) += 2; /* D D P Q D */
1843                        break;
1844                case ALGORITHM_RIGHT_ASYMMETRIC:
1845                        pd_idx = sector_div(stripe2, raid_disks);
1846                        qd_idx = pd_idx + 1;
1847                        if (pd_idx == raid_disks-1) {
1848                                (*dd_idx)++;    /* Q D D D P */
1849                                qd_idx = 0;
1850                        } else if (*dd_idx >= pd_idx)
1851                                (*dd_idx) += 2; /* D D P Q D */
1852                        break;
1853                case ALGORITHM_LEFT_SYMMETRIC:
1854                        pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1855                        qd_idx = (pd_idx + 1) % raid_disks;
1856                        *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
1857                        break;
1858                case ALGORITHM_RIGHT_SYMMETRIC:
1859                        pd_idx = sector_div(stripe2, raid_disks);
1860                        qd_idx = (pd_idx + 1) % raid_disks;
1861                        *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
1862                        break;
1863
1864                case ALGORITHM_PARITY_0:
1865                        pd_idx = 0;
1866                        qd_idx = 1;
1867                        (*dd_idx) += 2;
1868                        break;
1869                case ALGORITHM_PARITY_N:
1870                        pd_idx = data_disks;
1871                        qd_idx = data_disks + 1;
1872                        break;
1873
1874                case ALGORITHM_ROTATING_ZERO_RESTART:
1875                        /* Exactly the same as RIGHT_ASYMMETRIC, but or
1876                         * of blocks for computing Q is different.
1877                         */
1878                        pd_idx = sector_div(stripe2, raid_disks);
1879                        qd_idx = pd_idx + 1;
1880                        if (pd_idx == raid_disks-1) {
1881                                (*dd_idx)++;    /* Q D D D P */
1882                                qd_idx = 0;
1883                        } else if (*dd_idx >= pd_idx)
1884                                (*dd_idx) += 2; /* D D P Q D */
1885                        ddf_layout = 1;
1886                        break;
1887
1888                case ALGORITHM_ROTATING_N_RESTART:
1889                        /* Same a left_asymmetric, by first stripe is
1890                         * D D D P Q  rather than
1891                         * Q D D D P
1892                         */
1893                        stripe2 += 1;
1894                        pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1895                        qd_idx = pd_idx + 1;
1896                        if (pd_idx == raid_disks-1) {
1897                                (*dd_idx)++;    /* Q D D D P */
1898                                qd_idx = 0;
1899                        } else if (*dd_idx >= pd_idx)
1900                                (*dd_idx) += 2; /* D D P Q D */
1901                        ddf_layout = 1;
1902                        break;
1903
1904                case ALGORITHM_ROTATING_N_CONTINUE:
1905                        /* Same as left_symmetric but Q is before P */
1906                        pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1907                        qd_idx = (pd_idx + raid_disks - 1) % raid_disks;
1908                        *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1909                        ddf_layout = 1;
1910                        break;
1911
1912                case ALGORITHM_LEFT_ASYMMETRIC_6:
1913                        /* RAID5 left_asymmetric, with Q on last device */
1914                        pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
1915                        if (*dd_idx >= pd_idx)
1916                                (*dd_idx)++;
1917                        qd_idx = raid_disks - 1;
1918                        break;
1919
1920                case ALGORITHM_RIGHT_ASYMMETRIC_6:
1921                        pd_idx = sector_div(stripe2, raid_disks-1);
1922                        if (*dd_idx >= pd_idx)
1923                                (*dd_idx)++;
1924                        qd_idx = raid_disks - 1;
1925                        break;
1926
1927                case ALGORITHM_LEFT_SYMMETRIC_6:
1928                        pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
1929                        *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
1930                        qd_idx = raid_disks - 1;
1931                        break;
1932
1933                case ALGORITHM_RIGHT_SYMMETRIC_6:
1934                        pd_idx = sector_div(stripe2, raid_disks-1);
1935                        *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
1936                        qd_idx = raid_disks - 1;
1937                        break;
1938
1939                case ALGORITHM_PARITY_0_6:
1940                        pd_idx = 0;
1941                        (*dd_idx)++;
1942                        qd_idx = raid_disks - 1;
1943                        break;
1944
1945                default:
1946                        BUG();
1947                }
1948                break;
1949        }
1950
1951        if (sh) {
1952                sh->pd_idx = pd_idx;
1953                sh->qd_idx = qd_idx;
1954                sh->ddf_layout = ddf_layout;
1955        }
1956        /*
1957         * Finally, compute the new sector number
1958         */
1959        new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
1960        return new_sector;
1961}
1962
1963
1964static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
1965{
1966        raid5_conf_t *conf = sh->raid_conf;
1967        int raid_disks = sh->disks;
1968        int data_disks = raid_disks - conf->max_degraded;
1969        sector_t new_sector = sh->sector, check;
1970        int sectors_per_chunk = previous ? conf->prev_chunk_sectors
1971                                         : conf->chunk_sectors;
1972        int algorithm = previous ? conf->prev_algo
1973                                 : conf->algorithm;
1974        sector_t stripe;
1975        int chunk_offset;
1976        sector_t chunk_number;
1977        int dummy1, dd_idx = i;
1978        sector_t r_sector;
1979        struct stripe_head sh2;
1980
1981
1982        chunk_offset = sector_div(new_sector, sectors_per_chunk);
1983        stripe = new_sector;
1984
1985        if (i == sh->pd_idx)
1986                return 0;
1987        switch(conf->level) {
1988        case 4: break;
1989        case 5:
1990                switch (algorithm) {
1991                case ALGORITHM_LEFT_ASYMMETRIC:
1992                case ALGORITHM_RIGHT_ASYMMETRIC:
1993                        if (i > sh->pd_idx)
1994                                i--;
1995                        break;
1996                case ALGORITHM_LEFT_SYMMETRIC:
1997                case ALGORITHM_RIGHT_SYMMETRIC:
1998                        if (i < sh->pd_idx)
1999                                i += raid_disks;
2000                        i -= (sh->pd_idx + 1);
2001                        break;
2002                case ALGORITHM_PARITY_0:
2003                        i -= 1;
2004                        break;
2005                case ALGORITHM_PARITY_N:
2006                        break;
2007                default:
2008                        BUG();
2009                }
2010                break;
2011        case 6:
2012                if (i == sh->qd_idx)
2013                        return 0; /* It is the Q disk */
2014                switch (algorithm) {
2015                case ALGORITHM_LEFT_ASYMMETRIC:
2016                case ALGORITHM_RIGHT_ASYMMETRIC:
2017                case ALGORITHM_ROTATING_ZERO_RESTART:
2018                case ALGORITHM_ROTATING_N_RESTART:
2019                        if (sh->pd_idx == raid_disks-1)
2020                                i--;    /* Q D D D P */
2021                        else if (i > sh->pd_idx)
2022                                i -= 2; /* D D P Q D */
2023                        break;
2024                case ALGORITHM_LEFT_SYMMETRIC:
2025                case ALGORITHM_RIGHT_SYMMETRIC:
2026                        if (sh->pd_idx == raid_disks-1)
2027                                i--; /* Q D D D P */
2028                        else {
2029                                /* D D P Q D */
2030                                if (i < sh->pd_idx)
2031                                        i += raid_disks;
2032                                i -= (sh->pd_idx + 2);
2033                        }
2034                        break;
2035                case ALGORITHM_PARITY_0:
2036                        i -= 2;
2037                        break;
2038                case ALGORITHM_PARITY_N:
2039                        break;
2040                case ALGORITHM_ROTATING_N_CONTINUE:
2041                        /* Like left_symmetric, but P is before Q */
2042                        if (sh->pd_idx == 0)
2043                                i--;    /* P D D D Q */
2044                        else {
2045                                /* D D Q P D */
2046                                if (i < sh->pd_idx)
2047                                        i += raid_disks;
2048                                i -= (sh->pd_idx + 1);
2049                        }
2050                        break;
2051                case ALGORITHM_LEFT_ASYMMETRIC_6:
2052                case ALGORITHM_RIGHT_ASYMMETRIC_6:
2053                        if (i > sh->pd_idx)
2054                                i--;
2055                        break;
2056                case ALGORITHM_LEFT_SYMMETRIC_6:
2057                case ALGORITHM_RIGHT_SYMMETRIC_6:
2058                        if (i < sh->pd_idx)
2059                                i += data_disks + 1;
2060                        i -= (sh->pd_idx + 1);
2061                        break;
2062                case ALGORITHM_PARITY_0_6:
2063                        i -= 1;
2064                        break;
2065                default:
2066                        BUG();
2067                }
2068                break;
2069        }
2070
2071        chunk_number = stripe * data_disks + i;
2072        r_sector = chunk_number * sectors_per_chunk + chunk_offset;
2073
2074        check = raid5_compute_sector(conf, r_sector,
2075                                     previous, &dummy1, &sh2);
2076        if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
2077                || sh2.qd_idx != sh->qd_idx) {
2078                printk(KERN_ERR "md/raid:%s: compute_blocknr: map not correct\n",
2079                       mdname(conf->mddev));
2080                return 0;
2081        }
2082        return r_sector;
2083}
2084
2085
2086static void
2087schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
2088                         int rcw, int expand)
2089{
2090        int i, pd_idx = sh->pd_idx, disks = sh->disks;
2091        raid5_conf_t *conf = sh->raid_conf;
2092        int level = conf->level;
2093
2094        if (rcw) {
2095                /* if we are not expanding this is a proper write request, and
2096                 * there will be bios with new data to be drained into the
2097                 * stripe cache
2098                 */
2099                if (!expand) {
2100                        sh->reconstruct_state = reconstruct_state_drain_run;
2101                        set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2102                } else
2103                        sh->reconstruct_state = reconstruct_state_run;
2104
2105                set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
2106
2107                for (i = disks; i--; ) {
2108                        struct r5dev *dev = &sh->dev[i];
2109
2110                        if (dev->towrite) {
2111                                set_bit(R5_LOCKED, &dev->flags);
2112                                set_bit(R5_Wantdrain, &dev->flags);
2113                                if (!expand)
2114                                        clear_bit(R5_UPTODATE, &dev->flags);
2115                                s->locked++;
2116                        }
2117                }
2118                if (s->locked + conf->max_degraded == disks)
2119                        if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
2120                                atomic_inc(&conf->pending_full_writes);
2121        } else {
2122                BUG_ON(level == 6);
2123                BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
2124                        test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
2125
2126                sh->reconstruct_state = reconstruct_state_prexor_drain_run;
2127                set_bit(STRIPE_OP_PREXOR, &s->ops_request);
2128                set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2129                set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
2130
2131                for (i = disks; i--; ) {
2132                        struct r5dev *dev = &sh->dev[i];
2133                        if (i == pd_idx)
2134                                continue;
2135
2136                        if (dev->towrite &&
2137                            (test_bit(R5_UPTODATE, &dev->flags) ||
2138                             test_bit(R5_Wantcompute, &dev->flags))) {
2139                                set_bit(R5_Wantdrain, &dev->flags);
2140                                set_bit(R5_LOCKED, &dev->flags);
2141                                clear_bit(R5_UPTODATE, &dev->flags);
2142                                s->locked++;
2143                        }
2144                }
2145        }
2146
2147        /* keep the parity disk(s) locked while asynchronous operations
2148         * are in flight
2149         */
2150        set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
2151        clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
2152        s->locked++;
2153
2154        if (level == 6) {
2155                int qd_idx = sh->qd_idx;
2156                struct r5dev *dev = &sh->dev[qd_idx];
2157
2158                set_bit(R5_LOCKED, &dev->flags);
2159                clear_bit(R5_UPTODATE, &dev->flags);
2160                s->locked++;
2161        }
2162
2163        pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
2164                __func__, (unsigned long long)sh->sector,
2165                s->locked, s->ops_request);
2166}
2167
2168/*
2169 * Each stripe/dev can have one or more bion attached.
2170 * toread/towrite point to the first in a chain.
2171 * The bi_next chain must be in order.
2172 */
2173static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
2174{
2175        struct bio **bip;
2176        raid5_conf_t *conf = sh->raid_conf;
2177        int firstwrite=0;
2178
2179        pr_debug("adding bi b#%llu to stripe s#%llu\n",
2180                (unsigned long long)bi->bi_sector,
2181                (unsigned long long)sh->sector);
2182
2183
2184        spin_lock_irq(&conf->device_lock);
2185        if (forwrite) {
2186                bip = &sh->dev[dd_idx].towrite;
2187                if (*bip == NULL && sh->dev[dd_idx].written == NULL)
2188                        firstwrite = 1;
2189        } else
2190                bip = &sh->dev[dd_idx].toread;
2191        while (*bip && (*bip)->bi_sector < bi->bi_sector) {
2192                if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector)
2193                        goto overlap;
2194                bip = & (*bip)->bi_next;
2195        }
2196        if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
2197                goto overlap;
2198
2199        BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
2200        if (*bip)
2201                bi->bi_next = *bip;
2202        *bip = bi;
2203        bi->bi_phys_segments++;
2204
2205        if (forwrite) {
2206                /* check if page is covered */
2207                sector_t sector = sh->dev[dd_idx].sector;
2208                for (bi=sh->dev[dd_idx].towrite;
2209                     sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
2210                             bi && bi->bi_sector <= sector;
2211                     bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
2212                        if (bi->bi_sector + (bi->bi_size>>9) >= sector)
2213                                sector = bi->bi_sector + (bi->bi_size>>9);
2214                }
2215                if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
2216                        set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
2217        }
2218        spin_unlock_irq(&conf->device_lock);
2219
2220        pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
2221                (unsigned long long)(*bip)->bi_sector,
2222                (unsigned long long)sh->sector, dd_idx);
2223
2224        if (conf->mddev->bitmap && firstwrite) {
2225                bitmap_startwrite(conf->mddev->bitmap, sh->sector,
2226                                  STRIPE_SECTORS, 0);
2227                sh->bm_seq = conf->seq_flush+1;
2228                set_bit(STRIPE_BIT_DELAY, &sh->state);
2229        }
2230        return 1;
2231
2232 overlap:
2233        set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
2234        spin_unlock_irq(&conf->device_lock);
2235        return 0;
2236}
2237
2238static void end_reshape(raid5_conf_t *conf);
2239
2240static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous,
2241                            struct stripe_head *sh)
2242{
2243        int sectors_per_chunk =
2244                previous ? conf->prev_chunk_sectors : conf->chunk_sectors;
2245        int dd_idx;
2246        int chunk_offset = sector_div(stripe, sectors_per_chunk);
2247        int disks = previous ? conf->previous_raid_disks : conf->raid_disks;
2248
2249        raid5_compute_sector(conf,
2250                             stripe * (disks - conf->max_degraded)
2251                             *sectors_per_chunk + chunk_offset,
2252                             previous,
2253                             &dd_idx, sh);
2254}
2255
2256static void
2257handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh,
2258                                struct stripe_head_state *s, int disks,
2259                                struct bio **return_bi)
2260{
2261        int i;
2262        for (i = disks; i--; ) {
2263                struct bio *bi;
2264                int bitmap_end = 0;
2265
2266                if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
2267                        mdk_rdev_t *rdev;
2268                        rcu_read_lock();
2269                        rdev = rcu_dereference(conf->disks[i].rdev);
2270                        if (rdev && test_bit(In_sync, &rdev->flags))
2271                                atomic_inc(&rdev->nr_pending);
2272                        else
2273                                rdev = NULL;
2274                        rcu_read_unlock();
2275                        if (rdev) {
2276                                if (!rdev_set_badblocks(
2277                                            rdev,
2278                                            sh->sector,
2279                                            STRIPE_SECTORS, 0))
2280                                        md_error(conf->mddev, rdev);
2281                                rdev_dec_pending(rdev, conf->mddev);
2282                        }
2283                }
2284                spin_lock_irq(&conf->device_lock);
2285                /* fail all writes first */
2286                bi = sh->dev[i].towrite;
2287                sh->dev[i].towrite = NULL;
2288                if (bi) {
2289                        s->to_write--;
2290                        bitmap_end = 1;
2291                }
2292
2293                if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2294                        wake_up(&conf->wait_for_overlap);
2295
2296                while (bi && bi->bi_sector <
2297                        sh->dev[i].sector + STRIPE_SECTORS) {
2298                        struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
2299                        clear_bit(BIO_UPTODATE, &bi->bi_flags);
2300                        if (!raid5_dec_bi_phys_segments(bi)) {
2301                                md_write_end(conf->mddev);
2302                                bi->bi_next = *return_bi;
2303                                *return_bi = bi;
2304                        }
2305                        bi = nextbi;
2306                }
2307                /* and fail all 'written' */
2308                bi = sh->dev[i].written;
2309                sh->dev[i].written = NULL;
2310                if (bi) bitmap_end = 1;
2311                while (bi && bi->bi_sector <
2312                       sh->dev[i].sector + STRIPE_SECTORS) {
2313                        struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
2314                        clear_bit(BIO_UPTODATE, &bi->bi_flags);
2315                        if (!raid5_dec_bi_phys_segments(bi)) {
2316                                md_write_end(conf->mddev);
2317                                bi->bi_next = *return_bi;
2318                                *return_bi = bi;
2319                        }
2320                        bi = bi2;
2321                }
2322
2323                /* fail any reads if this device is non-operational and
2324                 * the data has not reached the cache yet.
2325                 */
2326                if (!test_bit(R5_Wantfill, &sh->dev[i].flags) &&
2327                    (!test_bit(R5_Insync, &sh->dev[i].flags) ||
2328                      test_bit(R5_ReadError, &sh->dev[i].flags))) {
2329                        bi = sh->dev[i].toread;
2330                        sh->dev[i].toread = NULL;
2331                        if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2332                                wake_up(&conf->wait_for_overlap);
2333                        if (bi) s->to_read--;
2334                        while (bi && bi->bi_sector <
2335                               sh->dev[i].sector + STRIPE_SECTORS) {
2336                                struct bio *nextbi =
2337                                        r5_next_bio(bi, sh->dev[i].sector);
2338                                clear_bit(BIO_UPTODATE, &bi->bi_flags);
2339                                if (!raid5_dec_bi_phys_segments(bi)) {
2340                                        bi->bi_next = *return_bi;
2341                                        *return_bi = bi;
2342                                }
2343                                bi = nextbi;
2344                        }
2345                }
2346                spin_unlock_irq(&conf->device_lock);
2347                if (bitmap_end)
2348                        bitmap_endwrite(conf->mddev->bitmap, sh->sector,
2349                                        STRIPE_SECTORS, 0, 0);
2350                /* If we were in the middle of a write the parity block might
2351                 * still be locked - so just clear all R5_LOCKED flags
2352                 */
2353                clear_bit(R5_LOCKED, &sh->dev[i].flags);
2354        }
2355
2356        if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
2357                if (atomic_dec_and_test(&conf->pending_full_writes))
2358                        md_wakeup_thread(conf->mddev->thread);
2359}
2360
2361static void
2362handle_failed_sync(raid5_conf_t *conf, struct stripe_head *sh,
2363                   struct stripe_head_state *s)
2364{
2365        int abort = 0;
2366        int i;
2367
2368        md_done_sync(conf->mddev, STRIPE_SECTORS, 0);
2369        clear_bit(STRIPE_SYNCING, &sh->state);
2370        s->syncing = 0;
2371        /* There is nothing more to do for sync/check/repair.
2372         * For recover we need to record a bad block on all
2373         * non-sync devices, or abort the recovery
2374         */
2375        if (!test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery))
2376                return;
2377        /* During recovery devices cannot be removed, so locking and
2378         * refcounting of rdevs is not needed
2379         */
2380        for (i = 0; i < conf->raid_disks; i++) {
2381                mdk_rdev_t *rdev = conf->disks[i].rdev;
2382                if (!rdev
2383                    || test_bit(Faulty, &rdev->flags)
2384                    || test_bit(In_sync, &rdev->flags))
2385                        continue;
2386                if (!rdev_set_badblocks(rdev, sh->sector,
2387                                        STRIPE_SECTORS, 0))
2388                        abort = 1;
2389        }
2390        if (abort) {
2391                conf->recovery_disabled = conf->mddev->recovery_disabled;
2392                set_bit(MD_RECOVERY_INTR, &conf->mddev->recovery);
2393        }
2394}
2395
2396/* fetch_block - checks the given member device to see if its data needs
2397 * to be read or computed to satisfy a request.
2398 *
2399 * Returns 1 when no more member devices need to be checked, otherwise returns
2400 * 0 to tell the loop in handle_stripe_fill to continue
2401 */
2402static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
2403                       int disk_idx, int disks)
2404{
2405        struct r5dev *dev = &sh->dev[disk_idx];
2406        struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]],
2407                                  &sh->dev[s->failed_num[1]] };
2408
2409        /* is the data in this block needed, and can we get it? */
2410        if (!test_bit(R5_LOCKED, &dev->flags) &&
2411            !test_bit(R5_UPTODATE, &dev->flags) &&
2412            (dev->toread ||
2413             (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
2414             s->syncing || s->expanding ||
2415             (s->failed >= 1 && fdev[0]->toread) ||
2416             (s->failed >= 2 && fdev[1]->toread) ||
2417             (sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite &&
2418              !test_bit(R5_OVERWRITE, &fdev[0]->flags)) ||
2419             (sh->raid_conf->level == 6 && s->failed && s->to_write))) {
2420                /* we would like to get this block, possibly by computing it,
2421                 * otherwise read it if the backing disk is insync
2422                 */
2423                BUG_ON(test_bit(R5_Wantcompute, &dev->flags));
2424                BUG_ON(test_bit(R5_Wantread, &dev->flags));
2425                if ((s->uptodate == disks - 1) &&
2426                    (s->failed && (disk_idx == s->failed_num[0] ||
2427                                   disk_idx == s->failed_num[1]))) {
2428                        /* have disk failed, and we're requested to fetch it;
2429                         * do compute it
2430                         */
2431                        pr_debug("Computing stripe %llu block %d\n",
2432                               (unsigned long long)sh->sector, disk_idx);
2433                        set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2434                        set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2435                        set_bit(R5_Wantcompute, &dev->flags);
2436                        sh->ops.target = disk_idx;
2437                        sh->ops.target2 = -1; /* no 2nd target */
2438                        s->req_compute = 1;
2439                        /* Careful: from this point on 'uptodate' is in the eye
2440                         * of raid_run_ops which services 'compute' operations
2441                         * before writes. R5_Wantcompute flags a block that will
2442                         * be R5_UPTODATE by the time it is needed for a
2443                         * subsequent operation.
2444                         */
2445                        s->uptodate++;
2446                        return 1;
2447                } else if (s->uptodate == disks-2 && s->failed >= 2) {
2448                        /* Computing 2-failure is *very* expensive; only
2449                         * do it if failed >= 2
2450                         */
2451                        int other;
2452                        for (other = disks; other--; ) {
2453                                if (other == disk_idx)
2454                                        continue;
2455                                if (!test_bit(R5_UPTODATE,
2456                                      &sh->dev[other].flags))
2457                                        break;
2458                        }
2459                        BUG_ON(other < 0);
2460                        pr_debug("Computing stripe %llu blocks %d,%d\n",
2461                               (unsigned long long)sh->sector,
2462                               disk_idx, other);
2463                        set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2464                        set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2465                        set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags);
2466                        set_bit(R5_Wantcompute, &sh->dev[other].flags);
2467                        sh->ops.target = disk_idx;
2468                        sh->ops.target2 = other;
2469                        s->uptodate += 2;
2470                        s->req_compute = 1;
2471                        return 1;
2472                } else if (test_bit(R5_Insync, &dev->flags)) {
2473                        set_bit(R5_LOCKED, &dev->flags);
2474                        set_bit(R5_Wantread, &dev->flags);
2475                        s->locked++;
2476                        pr_debug("Reading block %d (sync=%d)\n",
2477                                disk_idx, s->syncing);
2478                }
2479        }
2480
2481        return 0;
2482}
2483
2484/**
2485 * handle_stripe_fill - read or compute data to satisfy pending requests.
2486 */
2487static void handle_stripe_fill(struct stripe_head *sh,
2488                               struct stripe_head_state *s,
2489                               int disks)
2490{
2491        int i;
2492
2493        /* look for blocks to read/compute, skip this if a compute
2494         * is already in flight, or if the stripe contents are in the
2495         * midst of changing due to a write
2496         */
2497        if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
2498            !sh->reconstruct_state)
2499                for (i = disks; i--; )
2500                        if (fetch_block(sh, s, i, disks))
2501                                break;
2502        set_bit(STRIPE_HANDLE, &sh->state);
2503}
2504
2505
2506/* handle_stripe_clean_event
2507 * any written block on an uptodate or failed drive can be returned.
2508 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
2509 * never LOCKED, so we don't need to test 'failed' directly.
2510 */
2511static void handle_stripe_clean_event(raid5_conf_t *conf,
2512        struct stripe_head *sh, int disks, struct bio **return_bi)
2513{
2514        int i;
2515        struct r5dev *dev;
2516
2517        for (i = disks; i--; )
2518                if (sh->dev[i].written) {
2519                        dev = &sh->dev[i];
2520                        if (!test_bit(R5_LOCKED, &dev->flags) &&
2521                                test_bit(R5_UPTODATE, &dev->flags)) {
2522                                /* We can return any write requests */
2523                                struct bio *wbi, *wbi2;
2524                                int bitmap_end = 0;
2525                                pr_debug("Return write for disc %d\n", i);
2526                                spin_lock_irq(&conf->device_lock);
2527                                wbi = dev->written;
2528                                dev->written = NULL;
2529                                while (wbi && wbi->bi_sector <
2530                                        dev->sector + STRIPE_SECTORS) {
2531                                        wbi2 = r5_next_bio(wbi, dev->sector);
2532                                        if (!raid5_dec_bi_phys_segments(wbi)) {
2533                                                md_write_end(conf->mddev);
2534                                                wbi->bi_next = *return_bi;
2535                                                *return_bi = wbi;
2536                                        }
2537                                        wbi = wbi2;
2538                                }
2539                                if (dev->towrite == NULL)
2540                                        bitmap_end = 1;
2541                                spin_unlock_irq(&conf->device_lock);
2542                                if (bitmap_end)
2543                                        bitmap_endwrite(conf->mddev->bitmap,
2544                                                        sh->sector,
2545                                                        STRIPE_SECTORS,
2546                                         !test_bit(STRIPE_DEGRADED, &sh->state),
2547                                                        0);
2548                        }
2549                }
2550
2551        if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
2552                if (atomic_dec_and_test(&conf->pending_full_writes))
2553                        md_wakeup_thread(conf->mddev->thread);
2554}
2555
2556static void handle_stripe_dirtying(raid5_conf_t *conf,
2557                                   struct stripe_head *sh,
2558                                   struct stripe_head_state *s,
2559                                   int disks)
2560{
2561        int rmw = 0, rcw = 0, i;
2562        if (conf->max_degraded == 2) {
2563                /* RAID6 requires 'rcw' in current implementation
2564                 * Calculate the real rcw later - for now fake it
2565                 * look like rcw is cheaper
2566                 */
2567                rcw = 1; rmw = 2;
2568        } else for (i = disks; i--; ) {
2569                /* would I have to read this buffer for read_modify_write */
2570                struct r5dev *dev = &sh->dev[i];
2571                if ((dev->towrite || i == sh->pd_idx) &&
2572                    !test_bit(R5_LOCKED, &dev->flags) &&
2573                    !(test_bit(R5_UPTODATE, &dev->flags) ||
2574                      test_bit(R5_Wantcompute, &dev->flags))) {
2575                        if (test_bit(R5_Insync, &dev->flags))
2576                                rmw++;
2577                        else
2578                                rmw += 2*disks;  /* cannot read it */
2579                }
2580                /* Would I have to read this buffer for reconstruct_write */
2581                if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
2582                    !test_bit(R5_LOCKED, &dev->flags) &&
2583                    !(test_bit(R5_UPTODATE, &dev->flags) ||
2584                    test_bit(R5_Wantcompute, &dev->flags))) {
2585                        if (test_bit(R5_Insync, &dev->flags)) rcw++;
2586                        else
2587                                rcw += 2*disks;
2588                }
2589        }
2590        pr_debug("for sector %llu, rmw=%d rcw=%d\n",
2591                (unsigned long long)sh->sector, rmw, rcw);
2592        set_bit(STRIPE_HANDLE, &sh->state);
2593        if (rmw < rcw && rmw > 0)
2594                /* prefer read-modify-write, but need to get some data */
2595                for (i = disks; i--; ) {
2596                        struct r5dev *dev = &sh->dev[i];
2597                        if ((dev->towrite || i == sh->pd_idx) &&
2598                            !test_bit(R5_LOCKED, &dev->flags) &&
2599                            !(test_bit(R5_UPTODATE, &dev->flags) ||
2600                            test_bit(R5_Wantcompute, &dev->flags)) &&
2601                            test_bit(R5_Insync, &dev->flags)) {
2602                                if (
2603                                  test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
2604                                        pr_debug("Read_old block "
2605                                                "%d for r-m-w\n", i);
2606                                        set_bit(R5_LOCKED, &dev->flags);
2607                                        set_bit(R5_Wantread, &dev->flags);
2608                                        s->locked++;
2609                                } else {
2610                                        set_bit(STRIPE_DELAYED, &sh->state);
2611                                        set_bit(STRIPE_HANDLE, &sh->state);
2612                                }
2613                        }
2614                }
2615        if (rcw <= rmw && rcw > 0) {
2616                /* want reconstruct write, but need to get some data */
2617                rcw = 0;
2618                for (i = disks; i--; ) {
2619                        struct r5dev *dev = &sh->dev[i];
2620                        if (!test_bit(R5_OVERWRITE, &dev->flags) &&
2621                            i != sh->pd_idx && i != sh->qd_idx &&
2622                            !test_bit(R5_LOCKED, &dev->flags) &&
2623                            !(test_bit(R5_UPTODATE, &dev->flags) ||
2624                              test_bit(R5_Wantcompute, &dev->flags))) {
2625                                rcw++;
2626                                if (!test_bit(R5_Insync, &dev->flags))
2627                                        continue; /* it's a failed drive */
2628                                if (
2629                                  test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
2630                                        pr_debug("Read_old block "
2631                                                "%d for Reconstruct\n", i);
2632                                        set_bit(R5_LOCKED, &dev->flags);
2633                                        set_bit(R5_Wantread, &dev->flags);
2634                                        s->locked++;
2635                                } else {
2636                                        set_bit(STRIPE_DELAYED, &sh->state);
2637                                        set_bit(STRIPE_HANDLE, &sh->state);
2638                                }
2639                        }
2640                }
2641        }
2642        /* now if nothing is locked, and if we have enough data,
2643         * we can start a write request
2644         */
2645        /* since handle_stripe can be called at any time we need to handle the
2646         * case where a compute block operation has been submitted and then a
2647         * subsequent call wants to start a write request.  raid_run_ops only
2648         * handles the case where compute block and reconstruct are requested
2649         * simultaneously.  If this is not the case then new writes need to be
2650         * held off until the compute completes.
2651         */
2652        if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
2653            (s->locked == 0 && (rcw == 0 || rmw == 0) &&
2654            !test_bit(STRIPE_BIT_DELAY, &sh->state)))
2655                schedule_reconstruction(sh, s, rcw == 0, 0);
2656}
2657
2658static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
2659                                struct stripe_head_state *s, int disks)
2660{
2661        struct r5dev *dev = NULL;
2662
2663        set_bit(STRIPE_HANDLE, &sh->state);
2664
2665        switch (sh->check_state) {
2666        case check_state_idle:
2667                /* start a new check operation if there are no failures */
2668                if (s->failed == 0) {
2669                        BUG_ON(s->uptodate != disks);
2670                        sh->check_state = check_state_run;
2671                        set_bit(STRIPE_OP_CHECK, &s->ops_request);
2672                        clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
2673                        s->uptodate--;
2674                        break;
2675                }
2676                dev = &sh->dev[s->failed_num[0]];
2677                /* fall through */
2678        case check_state_compute_result:
2679                sh->check_state = check_state_idle;
2680                if (!dev)
2681                        dev = &sh->dev[sh->pd_idx];
2682
2683                /* check that a write has not made the stripe insync */
2684                if (test_bit(STRIPE_INSYNC, &sh->state))
2685                        break;
2686
2687                /* either failed parity check, or recovery is happening */
2688                BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
2689                BUG_ON(s->uptodate != disks);
2690
2691                set_bit(R5_LOCKED, &dev->flags);
2692                s->locked++;
2693                set_bit(R5_Wantwrite, &dev->flags);
2694
2695                clear_bit(STRIPE_DEGRADED, &sh->state);
2696                set_bit(STRIPE_INSYNC, &sh->state);
2697                break;
2698        case check_state_run:
2699                break; /* we will be called again upon completion */
2700        case check_state_check_result:
2701                sh->check_state = check_state_idle;
2702
2703                /* if a failure occurred during the check operation, leave
2704                 * STRIPE_INSYNC not set and let the stripe be handled again
2705                 */
2706                if (s->failed)
2707                        break;
2708
2709                /* handle a successful check operation, if parity is correct
2710                 * we are done.  Otherwise update the mismatch count and repair
2711                 * parity if !MD_RECOVERY_CHECK
2712                 */
2713                if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0)
2714                        /* parity is correct (on disc,
2715                         * not in buffer any more)
2716                         */
2717                        set_bit(STRIPE_INSYNC, &sh->state);
2718                else {
2719                        conf->mddev->resync_mismatches += STRIPE_SECTORS;
2720                        if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
2721                                /* don't try to repair!! */
2722                                set_bit(STRIPE_INSYNC, &sh->state);
2723                        else {
2724                                sh->check_state = check_state_compute_run;
2725                                set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2726                                set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2727                                set_bit(R5_Wantcompute,
2728                                        &sh->dev[sh->pd_idx].flags);
2729                                sh->ops.target = sh->pd_idx;
2730                                sh->ops.target2 = -1;
2731                                s->uptodate++;
2732                        }
2733                }
2734                break;
2735        case check_state_compute_run:
2736                break;
2737        default:
2738                printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
2739                       __func__, sh->check_state,
2740                       (unsigned long long) sh->sector);
2741                BUG();
2742        }
2743}
2744
2745
2746static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh,
2747                                  struct stripe_head_state *s,
2748                                  int disks)
2749{
2750        int pd_idx = sh->pd_idx;
2751        int qd_idx = sh->qd_idx;
2752        struct r5dev *dev;
2753
2754        set_bit(STRIPE_HANDLE, &sh->state);
2755
2756        BUG_ON(s->failed > 2);
2757
2758        /* Want to check and possibly repair P and Q.
2759         * However there could be one 'failed' device, in which
2760         * case we can only check one of them, possibly using the
2761         * other to generate missing data
2762         */
2763
2764        switch (sh->check_state) {
2765        case check_state_idle:
2766                /* start a new check operation if there are < 2 failures */
2767                if (s->failed == s->q_failed) {
2768                        /* The only possible failed device holds Q, so it
2769                         * makes sense to check P (If anything else were failed,
2770                         * we would have used P to recreate it).
2771                         */
2772                        sh->check_state = check_state_run;
2773                }
2774                if (!s->q_failed && s->failed < 2) {
2775                        /* Q is not failed, and we didn't use it to generate
2776                         * anything, so it makes sense to check it
2777                         */
2778                        if (sh->check_state == check_state_run)
2779                                sh->check_state = check_state_run_pq;
2780                        else
2781                                sh->check_state = check_state_run_q;
2782                }
2783
2784                /* discard potentially stale zero_sum_result */
2785                sh->ops.zero_sum_result = 0;
2786
2787                if (sh->check_state == check_state_run) {
2788                        /* async_xor_zero_sum destroys the contents of P */
2789                        clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
2790                        s->uptodate--;
2791                }
2792                if (sh->check_state >= check_state_run &&
2793                    sh->check_state <= check_state_run_pq) {
2794                        /* async_syndrome_zero_sum preserves P and Q, so
2795                         * no need to mark them !uptodate here
2796                         */
2797                        set_bit(STRIPE_OP_CHECK, &s->ops_request);
2798                        break;
2799                }
2800
2801                /* we have 2-disk failure */
2802                BUG_ON(s->failed != 2);
2803                /* fall through */
2804        case check_state_compute_result:
2805                sh->check_state = check_state_idle;
2806
2807                /* check that a write has not made the stripe insync */
2808                if (test_bit(STRIPE_INSYNC, &sh->state))
2809                        break;
2810
2811                /* now write out any block on a failed drive,
2812                 * or P or Q if they were recomputed
2813                 */
2814                BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */
2815                if (s->failed == 2) {
2816                        dev = &sh->dev[s->failed_num[1]];
2817                        s->locked++;
2818                        set_bit(R5_LOCKED, &dev->flags);
2819                        set_bit(R5_Wantwrite, &dev->flags);
2820                }
2821                if (s->failed >= 1) {
2822                        dev = &sh->dev[s->failed_num[0]];
2823                        s->locked++;
2824                        set_bit(R5_LOCKED, &dev->flags);
2825                        set_bit(R5_Wantwrite, &dev->flags);
2826                }
2827                if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
2828                        dev = &sh->dev[pd_idx];
2829                        s->locked++;
2830                        set_bit(R5_LOCKED, &dev->flags);
2831                        set_bit(R5_Wantwrite, &dev->flags);
2832                }
2833                if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
2834                        dev = &sh->dev[qd_idx];
2835                        s->locked++;
2836                        set_bit(R5_LOCKED, &dev->flags);
2837                        set_bit(R5_Wantwrite, &dev->flags);
2838                }
2839                clear_bit(STRIPE_DEGRADED, &sh->state);
2840
2841                set_bit(STRIPE_INSYNC, &sh->state);
2842                break;
2843        case check_state_run:
2844        case check_state_run_q:
2845        case check_state_run_pq:
2846                break; /* we will be called again upon completion */
2847        case check_state_check_result:
2848                sh->check_state = check_state_idle;
2849
2850                /* handle a successful check operation, if parity is correct
2851                 * we are done.  Otherwise update the mismatch count and repair
2852                 * parity if !MD_RECOVERY_CHECK
2853                 */
2854                if (sh->ops.zero_sum_result == 0) {
2855                        /* both parities are correct */
2856                        if (!s->failed)
2857                                set_bit(STRIPE_INSYNC, &sh->state);
2858                        else {
2859                                /* in contrast to the raid5 case we can validate
2860                                 * parity, but still have a failure to write
2861                                 * back
2862                                 */
2863                                sh->check_state = check_state_compute_result;
2864                                /* Returning at this point means that we may go
2865                                 * off and bring p and/or q uptodate again so
2866                                 * we make sure to check zero_sum_result again
2867                                 * to verify if p or q need writeback
2868                                 */
2869                        }
2870                } else {
2871                        conf->mddev->resync_mismatches += STRIPE_SECTORS;
2872                        if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
2873                                /* don't try to repair!! */
2874                                set_bit(STRIPE_INSYNC, &sh->state);
2875                        else {
2876                                int *target = &sh->ops.target;
2877
2878                                sh->ops.target = -1;
2879                                sh->ops.target2 = -1;
2880                                sh->check_state = check_state_compute_run;
2881                                set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2882                                set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2883                                if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
2884                                        set_bit(R5_Wantcompute,
2885                                                &sh->dev[pd_idx].flags);
2886                                        *target = pd_idx;
2887                                        target = &sh->ops.target2;
2888                                        s->uptodate++;
2889                                }
2890                                if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
2891                                        set_bit(R5_Wantcompute,
2892                                                &sh->dev[qd_idx].flags);
2893                                        *target = qd_idx;
2894                                        s->uptodate++;
2895                                }
2896                        }
2897                }
2898                break;
2899        case check_state_compute_run:
2900                break;
2901        default:
2902                printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
2903                       __func__, sh->check_state,
2904                       (unsigned long long) sh->sector);
2905                BUG();
2906        }
2907}
2908
2909static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh)
2910{
2911        int i;
2912
2913        /* We have read all the blocks in this stripe and now we need to
2914         * copy some of them into a target stripe for expand.
2915         */
2916        struct dma_async_tx_descriptor *tx = NULL;
2917        clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
2918        for (i = 0; i < sh->disks; i++)
2919                if (i != sh->pd_idx && i != sh->qd_idx) {
2920                        int dd_idx, j;
2921                        struct stripe_head *sh2;
2922                        struct async_submit_ctl submit;
2923
2924                        sector_t bn = compute_blocknr(sh, i, 1);
2925                        sector_t s = raid5_compute_sector(conf, bn, 0,
2926                                                          &dd_idx, NULL);
2927                        sh2 = get_active_stripe(conf, s, 0, 1, 1);
2928                        if (sh2 == NULL)
2929                                /* so far only the early blocks of this stripe
2930                                 * have been requested.  When later blocks
2931                                 * get requested, we will try again
2932                                 */
2933                                continue;
2934                        if (!test_bit(STRIPE_EXPANDING, &sh2->state) ||
2935                           test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
2936                                /* must have already done this block */
2937                                release_stripe(sh2);
2938                                continue;
2939                        }
2940
2941                        /* place all the copies on one channel */
2942                        init_async_submit(&submit, 0, tx, NULL, NULL, NULL);
2943                        tx = async_memcpy(sh2->dev[dd_idx].page,
2944                                          sh->dev[i].page, 0, 0, STRIPE_SIZE,
2945                                          &submit);
2946
2947                        set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
2948                        set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
2949                        for (j = 0; j < conf->raid_disks; j++)
2950                                if (j != sh2->pd_idx &&
2951                                    j != sh2->qd_idx &&
2952                                    !test_bit(R5_Expanded, &sh2->dev[j].flags))
2953                                        break;
2954                        if (j == conf->raid_disks) {
2955                                set_bit(STRIPE_EXPAND_READY, &sh2->state);
2956                                set_bit(STRIPE_HANDLE, &sh2->state);
2957                        }
2958                        release_stripe(sh2);
2959
2960                }
2961        /* done submitting copies, wait for them to complete */
2962        if (tx) {
2963                async_tx_ack(tx);
2964                dma_wait_for_async_tx(tx);
2965        }
2966}
2967
2968
2969/*
2970 * handle_stripe - do things to a stripe.
2971 *
2972 * We lock the stripe and then examine the state of various bits
2973 * to see what needs to be done.
2974 * Possible results:
2975 *    return some read request which now have data
2976 *    return some write requests which are safely on disc
2977 *    schedule a read on some buffers
2978 *    schedule a write of some buffers
2979 *    return confirmation of parity correctness
2980 *
2981 * buffers are taken off read_list or write_list, and bh_cache buffers
2982 * get BH_Lock set before the stripe lock is released.
2983 *
2984 */
2985
2986static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
2987{
2988        raid5_conf_t *conf = sh->raid_conf;
2989        int disks = sh->disks;
2990        struct r5dev *dev;
2991        int i;
2992
2993        memset(s, 0, sizeof(*s));
2994
2995        s->syncing = test_bit(STRIPE_SYNCING, &sh->state);
2996        s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
2997        s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
2998        s->failed_num[0] = -1;
2999        s->failed_num[1] = -1;
3000
3001        /* Now to look around and see what can be done */
3002        rcu_read_lock();
3003        spin_lock_irq(&conf->device_lock);
3004        for (i=disks; i--; ) {
3005                mdk_rdev_t *rdev;
3006                sector_t first_bad;
3007                int bad_sectors;
3008                int is_bad = 0;
3009
3010                dev = &sh->dev[i];
3011
3012                pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
3013                        i, dev->flags, dev->toread, dev->towrite, dev->written);
3014                /* maybe we can reply to a read
3015                 *
3016                 * new wantfill requests are only permitted while
3017                 * ops_complete_biofill is guaranteed to be inactive
3018                 */
3019                if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
3020                    !test_bit(STRIPE_BIOFILL_RUN, &sh->state))
3021                        set_bit(R5_Wantfill, &dev->flags);
3022
3023                /* now count some things */
3024                if (test_bit(R5_LOCKED, &dev->flags))
3025                        s->locked++;
3026                if (test_bit(R5_UPTODATE, &dev->flags))
3027                        s->uptodate++;
3028                if (test_bit(R5_Wantcompute, &dev->flags)) {
3029                        s->compute++;
3030                        BUG_ON(s->compute > 2);
3031                }
3032
3033                if (test_bit(R5_Wantfill, &dev->flags))
3034                        s->to_fill++;
3035                else if (dev->toread)
3036                        s->to_read++;
3037                if (dev->towrite) {
3038                        s->to_write++;
3039                        if (!test_bit(R5_OVERWRITE, &dev->flags))
3040                                s->non_overwrite++;
3041                }
3042                if (dev->written)
3043                        s->written++;
3044                rdev = rcu_dereference(conf->disks[i].rdev);
3045                if (rdev) {
3046                        is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
3047                                             &first_bad, &bad_sectors);
3048                        if (s->blocked_rdev == NULL
3049                            && (test_bit(Blocked, &rdev->flags)
3050                                || is_bad < 0)) {
3051                                if (is_bad < 0)
3052                                        set_bit(BlockedBadBlocks,
3053                                                &rdev->flags);
3054                                s->blocked_rdev = rdev;
3055                                atomic_inc(&rdev->nr_pending);
3056                        }
3057                }
3058                clear_bit(R5_Insync, &dev->flags);
3059                if (!rdev)
3060                        /* Not in-sync */;
3061                else if (is_bad) {
3062                        /* also not in-sync */
3063                        if (!test_bit(WriteErrorSeen, &rdev->flags)) {
3064                                /* treat as in-sync, but with a read error
3065                                 * which we can now try to correct
3066                                 */
3067                                set_bit(R5_Insync, &dev->flags);
3068                                set_bit(R5_ReadError, &dev->flags);
3069                        }
3070                } else if (test_bit(In_sync, &rdev->flags))
3071                        set_bit(R5_Insync, &dev->flags);
3072                else {
3073                        /* in sync if before recovery_offset */
3074                        if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
3075                                set_bit(R5_Insync, &dev->flags);
3076                }
3077                if (test_bit(R5_WriteError, &dev->flags)) {
3078                        clear_bit(R5_Insync, &dev->flags);
3079                        if (!test_bit(Faulty, &rdev->flags)) {
3080                                s->handle_bad_blocks = 1;
3081                                atomic_inc(&rdev->nr_pending);
3082                        } else
3083                                clear_bit(R5_WriteError, &dev->flags);
3084                }
3085                if (test_bit(R5_MadeGood, &dev->flags)) {
3086                        if (!test_bit(Faulty, &rdev->flags)) {
3087                                s->handle_bad_blocks = 1;
3088                                atomic_inc(&rdev->nr_pending);
3089                        } else
3090                                clear_bit(R5_MadeGood, &dev->flags);
3091                }
3092                if (!test_bit(R5_Insync, &dev->flags)) {
3093                        /* The ReadError flag will just be confusing now */
3094                        clear_bit(R5_ReadError, &dev->flags);
3095                        clear_bit(R5_ReWrite, &dev->flags);
3096                }
3097                if (test_bit(R5_ReadError, &dev->flags))
3098                        clear_bit(R5_Insync, &dev->flags);
3099                if (!test_bit(R5_Insync, &dev->flags)) {
3100                        if (s->failed < 2)
3101                                s->failed_num[s->failed] = i;
3102                        s->failed++;
3103                }
3104        }
3105        spin_unlock_irq(&conf->device_lock);
3106        rcu_read_unlock();
3107}
3108
3109static void handle_stripe(struct stripe_head *sh)
3110{
3111        struct stripe_head_state s;
3112        raid5_conf_t *conf = sh->raid_conf;
3113        int i;
3114        int prexor;
3115        int disks = sh->disks;
3116        struct r5dev *pdev, *qdev;
3117
3118        clear_bit(STRIPE_HANDLE, &sh->state);
3119        if (test_and_set_bit(STRIPE_ACTIVE, &sh->state)) {
3120                /* already being handled, ensure it gets handled
3121                 * again when current action finishes */
3122                set_bit(STRIPE_HANDLE, &sh->state);
3123                return;
3124        }
3125
3126        if (test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
3127                set_bit(STRIPE_SYNCING, &sh->state);
3128                clear_bit(STRIPE_INSYNC, &sh->state);
3129        }
3130        clear_bit(STRIPE_DELAYED, &sh->state);
3131
3132        pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
3133                "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
3134               (unsigned long long)sh->sector, sh->state,
3135               atomic_read(&sh->count), sh->pd_idx, sh->qd_idx,
3136               sh->check_state, sh->reconstruct_state);
3137
3138        analyse_stripe(sh, &s);
3139
3140        if (s.handle_bad_blocks) {
3141                set_bit(STRIPE_HANDLE, &sh->state);
3142                goto finish;
3143        }
3144
3145        if (unlikely(s.blocked_rdev)) {
3146                if (s.syncing || s.expanding || s.expanded ||
3147                    s.to_write || s.written) {
3148                        set_bit(STRIPE_HANDLE, &sh->state);
3149                        goto finish;
3150                }
3151                /* There is nothing for the blocked_rdev to block */
3152                rdev_dec_pending(s.blocked_rdev, conf->mddev);
3153                s.blocked_rdev = NULL;
3154        }
3155
3156        if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
3157                set_bit(STRIPE_OP_BIOFILL, &s.ops_request);
3158                set_bit(STRIPE_BIOFILL_RUN, &sh->state);
3159        }
3160
3161        pr_debug("locked=%d uptodate=%d to_read=%d"
3162               " to_write=%d failed=%d failed_num=%d,%d\n",
3163               s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
3164               s.failed_num[0], s.failed_num[1]);
3165        /* check if the array has lost more than max_degraded devices and,
3166         * if so, some requests might need to be failed.
3167         */
3168        if (s.failed > conf->max_degraded && s.to_read+s.to_write+s.written)
3169                handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
3170        if (s.failed > conf->max_degraded && s.syncing)
3171                handle_failed_sync(conf, sh, &s);
3172
3173        /*
3174         * might be able to return some write requests if the parity blocks
3175         * are safe, or on a failed drive
3176         */
3177        pdev = &sh->dev[sh->pd_idx];
3178        s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx)
3179                || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx);
3180        qdev = &sh->dev[sh->qd_idx];
3181        s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx)
3182                || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx)
3183                || conf->level < 6;
3184
3185        if (s.written &&
3186            (s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
3187                             && !test_bit(R5_LOCKED, &pdev->flags)
3188                             && test_bit(R5_UPTODATE, &pdev->flags)))) &&
3189            (s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
3190                             && !test_bit(R5_LOCKED, &qdev->flags)
3191                             && test_bit(R5_UPTODATE, &qdev->flags)))))
3192                handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
3193
3194        /* Now we might consider reading some blocks, either to check/generate
3195         * parity, or to satisfy requests
3196         * or to load a block that is being partially written.
3197         */
3198        if (s.to_read || s.non_overwrite
3199            || (conf->level == 6 && s.to_write && s.failed)
3200            || (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding)
3201                handle_stripe_fill(sh, &s, disks);
3202
3203        /* Now we check to see if any write operations have recently
3204         * completed
3205         */
3206        prexor = 0;
3207        if (sh->reconstruct_state == reconstruct_state_prexor_drain_result)
3208                prexor = 1;
3209        if (sh->reconstruct_state == reconstruct_state_drain_result ||
3210            sh->reconstruct_state == reconstruct_state_prexor_drain_result) {
3211                sh->reconstruct_state = reconstruct_state_idle;
3212
3213                /* All the 'written' buffers and the parity block are ready to
3214                 * be written back to disk
3215                 */
3216                BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags));
3217                BUG_ON(sh->qd_idx >= 0 &&
3218                       !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags));
3219                for (i = disks; i--; ) {
3220                        struct r5dev *dev = &sh->dev[i];
3221                        if (test_bit(R5_LOCKED, &dev->flags) &&
3222                                (i == sh->pd_idx || i == sh->qd_idx ||
3223                                 dev->written)) {
3224                                pr_debug("Writing block %d\n", i);
3225                                set_bit(R5_Wantwrite, &dev->flags);
3226                                if (prexor)
3227                                        continue;
3228                                if (!test_bit(R5_Insync, &dev->flags) ||
3229                                    ((i == sh->pd_idx || i == sh->qd_idx)  &&
3230                                     s.failed == 0))
3231                                        set_bit(STRIPE_INSYNC, &sh->state);
3232                        }
3233                }
3234                if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3235                        s.dec_preread_active = 1;
3236        }
3237
3238        /* Now to consider new write requests and what else, if anything
3239         * should be read.  We do not handle new writes when:
3240         * 1/ A 'write' operation (copy+xor) is already in flight.
3241         * 2/ A 'check' operation is in flight, as it may clobber the parity
3242         *    block.
3243         */
3244        if (s.to_write && !sh->reconstruct_state && !sh->check_state)
3245                handle_stripe_dirtying(conf, sh, &s, disks);
3246
3247        /* maybe we need to check and possibly fix the parity for this stripe
3248         * Any reads will already have been scheduled, so we just see if enough
3249         * data is available.  The parity check is held off while parity
3250         * dependent operations are in flight.
3251         */
3252        if (sh->check_state ||
3253            (s.syncing && s.locked == 0 &&
3254             !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
3255             !test_bit(STRIPE_INSYNC, &sh->state))) {
3256                if (conf->level == 6)
3257                        handle_parity_checks6(conf, sh, &s, disks);
3258                else
3259                        handle_parity_checks5(conf, sh, &s, disks);
3260        }
3261
3262        if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
3263                md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
3264                clear_bit(STRIPE_SYNCING, &sh->state);
3265        }
3266
3267        /* If the failed drives are just a ReadError, then we might need
3268         * to progress the repair/check process
3269         */
3270        if (s.failed <= conf->max_degraded && !conf->mddev->ro)
3271                for (i = 0; i < s.failed; i++) {
3272                        struct r5dev *dev = &sh->dev[s.failed_num[i]];
3273                        if (test_bit(R5_ReadError, &dev->flags)
3274                            && !test_bit(R5_LOCKED, &dev->flags)
3275                            && test_bit(R5_UPTODATE, &dev->flags)
3276                                ) {
3277                                if (!test_bit(R5_ReWrite, &dev->flags)) {
3278                                        set_bit(R5_Wantwrite, &dev->flags);
3279                                        set_bit(R5_ReWrite, &dev->flags);
3280                                        set_bit(R5_LOCKED, &dev->flags);
3281                                        s.locked++;
3282                                } else {
3283                                        /* let's read it back */
3284                                        set_bit(R5_Wantread, &dev->flags);
3285                                        set_bit(R5_LOCKED, &dev->flags);
3286                                        s.locked++;
3287                                }
3288                        }
3289                }
3290
3291
3292        /* Finish reconstruct operations initiated by the expansion process */
3293        if (sh->reconstruct_state == reconstruct_state_result) {
3294                struct stripe_head *sh_src
3295                        = get_active_stripe(conf, sh->sector, 1, 1, 1);
3296                if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) {
3297                        /* sh cannot be written until sh_src has been read.
3298                         * so arrange for sh to be delayed a little
3299                         */
3300                        set_bit(STRIPE_DELAYED, &sh->state);
3301                        set_bit(STRIPE_HANDLE, &sh->state);
3302                        if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
3303                                              &sh_src->state))
3304                                atomic_inc(&conf->preread_active_stripes);
3305                        release_stripe(sh_src);
3306                        goto finish;
3307                }
3308                if (sh_src)
3309                        release_stripe(sh_src);
3310
3311                sh->reconstruct_state = reconstruct_state_idle;
3312                clear_bit(STRIPE_EXPANDING, &sh->state);
3313                for (i = conf->raid_disks; i--; ) {
3314                        set_bit(R5_Wantwrite, &sh->dev[i].flags);
3315                        set_bit(R5_LOCKED, &sh->dev[i].flags);
3316                        s.locked++;
3317                }
3318        }
3319
3320        if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
3321            !sh->reconstruct_state) {
3322                /* Need to write out all blocks after computing parity */
3323                sh->disks = conf->raid_disks;
3324                stripe_set_idx(sh->sector, conf, 0, sh);
3325                schedule_reconstruction(sh, &s, 1, 1);
3326        } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
3327                clear_bit(STRIPE_EXPAND_READY, &sh->state);
3328                atomic_dec(&conf->reshape_stripes);
3329                wake_up(&conf->wait_for_overlap);
3330                md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
3331        }
3332
3333        if (s.expanding && s.locked == 0 &&
3334            !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
3335                handle_stripe_expansion(conf, sh);
3336
3337finish:
3338        /* wait for this device to become unblocked */
3339        if (conf->mddev->external && unlikely(s.blocked_rdev))
3340                md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev);
3341
3342        if (s.handle_bad_blocks)
3343                for (i = disks; i--; ) {
3344                        mdk_rdev_t *rdev;
3345                        struct r5dev *dev = &sh->dev[i];
3346                        if (test_and_clear_bit(R5_WriteError, &dev->flags)) {
3347                                /* We own a safe reference to the rdev */
3348                                rdev = conf->disks[i].rdev;
3349                                if (!rdev_set_badblocks(rdev, sh->sector,
3350                                                        STRIPE_SECTORS, 0))
3351                                        md_error(conf->mddev, rdev);
3352                                rdev_dec_pending(rdev, conf->mddev);
3353                        }
3354                        if (test_and_clear_bit(R5_MadeGood, &dev->flags)) {
3355                                rdev = conf->disks[i].rdev;
3356                                rdev_clear_badblocks(rdev, sh->sector,
3357                                                     STRIPE_SECTORS);
3358                                rdev_dec_pending(rdev, conf->mddev);
3359                        }
3360                }
3361
3362        if (s.ops_request)
3363                raid_run_ops(sh, s.ops_request);
3364
3365        ops_run_io(sh, &s);
3366
3367        if (s.dec_preread_active) {
3368                /* We delay this until after ops_run_io so that if make_request
3369                 * is waiting on a flush, it won't continue until the writes
3370                 * have actually been submitted.
3371                 */
3372                atomic_dec(&conf->preread_active_stripes);
3373                if (atomic_read(&conf->preread_active_stripes) <
3374                    IO_THRESHOLD)
3375                        md_wakeup_thread(conf->mddev->thread);
3376        }
3377
3378        return_io(s.return_bi);
3379
3380        clear_bit(STRIPE_ACTIVE, &sh->state);
3381}
3382
3383static void raid5_activate_delayed(raid5_conf_t *conf)
3384{
3385        if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
3386                while (!list_empty(&conf->delayed_list)) {
3387                        struct list_head *l = conf->delayed_list.next;
3388                        struct stripe_head *sh;
3389                        sh = list_entry(l, struct stripe_head, lru);
3390                        list_del_init(l);
3391                        clear_bit(STRIPE_DELAYED, &sh->state);
3392                        if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3393                                atomic_inc(&conf->preread_active_stripes);
3394                        list_add_tail(&sh->lru, &conf->hold_list);
3395                }
3396        }
3397}
3398
3399static void activate_bit_delay(raid5_conf_t *conf)
3400{
3401        /* device_lock is held */
3402        struct list_head head;
3403        list_add(&head, &conf->bitmap_list);
3404        list_del_init(&conf->bitmap_list);
3405        while (!list_empty(&head)) {
3406                struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
3407                list_del_init(&sh->lru);
3408                atomic_inc(&sh->count);
3409                __release_stripe(conf, sh);
3410        }
3411}
3412
3413int md_raid5_congested(mddev_t *mddev, int bits)
3414{
3415        raid5_conf_t *conf = mddev->private;
3416
3417        /* No difference between reads and writes.  Just check
3418         * how busy the stripe_cache is
3419         */
3420
3421        if (conf->inactive_blocked)
3422                return 1;
3423        if (conf->quiesce)
3424                return 1;
3425        if (list_empty_careful(&conf->inactive_list))
3426                return 1;
3427
3428        return 0;
3429}
3430EXPORT_SYMBOL_GPL(md_raid5_congested);
3431
3432static int raid5_congested(void *data, int bits)
3433{
3434        mddev_t *mddev = data;
3435
3436        return mddev_congested(mddev, bits) ||
3437                md_raid5_congested(mddev, bits);
3438}
3439
3440/* We want read requests to align with chunks where possible,
3441 * but write requests don't need to.
3442 */
3443static int raid5_mergeable_bvec(struct request_queue *q,
3444                                struct bvec_merge_data *bvm,
3445                                struct bio_vec *biovec)
3446{
3447        mddev_t *mddev = q->queuedata;
3448        sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
3449        int max;
3450        unsigned int chunk_sectors = mddev->chunk_sectors;
3451        unsigned int bio_sectors = bvm->bi_size >> 9;
3452
3453        if ((bvm->bi_rw & 1) == WRITE)
3454                return biovec->bv_len; /* always allow writes to be mergeable */
3455
3456        if (mddev->new_chunk_sectors < mddev->chunk_sectors)
3457                chunk_sectors = mddev->new_chunk_sectors;
3458        max =  (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
3459        if (max < 0) max = 0;
3460        if (max <= biovec->bv_len && bio_sectors == 0)
3461                return biovec->bv_len;
3462        else
3463                return max;
3464}
3465
3466
3467static int in_chunk_boundary(mddev_t *mddev, struct bio *bio)
3468{
3469        sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
3470        unsigned int chunk_sectors = mddev->chunk_sectors;
3471        unsigned int bio_sectors = bio->bi_size >> 9;
3472
3473        if (mddev->new_chunk_sectors < mddev->chunk_sectors)
3474                chunk_sectors = mddev->new_chunk_sectors;
3475        return  chunk_sectors >=
3476                ((sector & (chunk_sectors - 1)) + bio_sectors);
3477}
3478
3479/*
3480 *  add bio to the retry LIFO  ( in O(1) ... we are in interrupt )
3481 *  later sampled by raid5d.
3482 */
3483static void add_bio_to_retry(struct bio *bi,raid5_conf_t *conf)
3484{
3485        unsigned long flags;
3486
3487        spin_lock_irqsave(&conf->device_lock, flags);
3488
3489        bi->bi_next = conf->retry_read_aligned_list;
3490        conf->retry_read_aligned_list = bi;
3491
3492        spin_unlock_irqrestore(&conf->device_lock, flags);
3493        md_wakeup_thread(conf->mddev->thread);
3494}
3495
3496
3497static struct bio *remove_bio_from_retry(raid5_conf_t *conf)
3498{
3499        struct bio *bi;
3500
3501        bi = conf->retry_read_aligned;
3502        if (bi) {
3503                conf->retry_read_aligned = NULL;
3504                return bi;
3505        }
3506        bi = conf->retry_read_aligned_list;
3507        if(bi) {
3508                conf->retry_read_aligned_list = bi->bi_next;
3509                bi->bi_next = NULL;
3510                /*
3511                 * this sets the active strip count to 1 and the processed
3512                 * strip count to zero (upper 8 bits)
3513                 */
3514                bi->bi_phys_segments = 1; /* biased count of active stripes */
3515        }
3516
3517        return bi;
3518}
3519
3520
3521/*
3522 *  The "raid5_align_endio" should check if the read succeeded and if it
3523 *  did, call bio_endio on the original bio (having bio_put the new bio
3524 *  first).
3525 *  If the read failed..
3526 */
3527static void raid5_align_endio(struct bio *bi, int error)
3528{
3529        struct bio* raid_bi  = bi->bi_private;
3530        mddev_t *mddev;
3531        raid5_conf_t *conf;
3532        int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
3533        mdk_rdev_t *rdev;
3534
3535        bio_put(bi);
3536
3537        rdev = (void*)raid_bi->bi_next;
3538        raid_bi->bi_next = NULL;
3539        mddev = rdev->mddev;
3540        conf = mddev->private;
3541
3542        rdev_dec_pending(rdev, conf->mddev);
3543
3544        if (!error && uptodate) {
3545                bio_endio(raid_bi, 0);
3546                if (atomic_dec_and_test(&conf->active_aligned_reads))
3547                        wake_up(&conf->wait_for_stripe);
3548                return;
3549        }
3550
3551
3552        pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
3553
3554        add_bio_to_retry(raid_bi, conf);
3555}
3556
3557static int bio_fits_rdev(struct bio *bi)
3558{
3559        struct request_queue *q = bdev_get_queue(bi->bi_bdev);
3560
3561        if ((bi->bi_size>>9) > queue_max_sectors(q))
3562                return 0;
3563        blk_recount_segments(q, bi);
3564        if (bi->bi_phys_segments > queue_max_segments(q))
3565                return 0;
3566
3567        if (q->merge_bvec_fn)
3568                /* it's too hard to apply the merge_bvec_fn at this stage,
3569                 * just just give up
3570                 */
3571                return 0;
3572
3573        return 1;
3574}
3575
3576
3577static int chunk_aligned_read(mddev_t *mddev, struct bio * raid_bio)
3578{
3579        raid5_conf_t *conf = mddev->private;
3580        int dd_idx;
3581        struct bio* align_bi;
3582        mdk_rdev_t *rdev;
3583
3584        if (!in_chunk_boundary(mddev, raid_bio)) {
3585                pr_debug("chunk_aligned_read : non aligned\n");
3586                return 0;
3587        }
3588        /*
3589         * use bio_clone_mddev to make a copy of the bio
3590         */
3591        align_bi = bio_clone_mddev(raid_bio, GFP_NOIO, mddev);
3592        if (!align_bi)
3593                return 0;
3594        /*
3595         *   set bi_end_io to a new function, and set bi_private to the
3596         *     original bio.
3597         */
3598        align_bi->bi_end_io  = raid5_align_endio;
3599        align_bi->bi_private = raid_bio;
3600        /*
3601         *      compute position
3602         */
3603        align_bi->bi_sector =  raid5_compute_sector(conf, raid_bio->bi_sector,
3604                                                    0,
3605                                                    &dd_idx, NULL);
3606
3607        rcu_read_lock();
3608        rdev = rcu_dereference(conf->disks[dd_idx].rdev);
3609        if (rdev && test_bit(In_sync, &rdev->flags)) {
3610                sector_t first_bad;
3611                int bad_sectors;
3612
3613                atomic_inc(&rdev->nr_pending);
3614                rcu_read_unlock();
3615                raid_bio->bi_next = (void*)rdev;
3616                align_bi->bi_bdev =  rdev->bdev;
3617                align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
3618                align_bi->bi_sector += rdev->data_offset;
3619
3620                if (!bio_fits_rdev(align_bi) ||
3621                    is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9,
3622                                &first_bad, &bad_sectors)) {
3623                        /* too big in some way, or has a known bad block */
3624                        bio_put(align_bi);
3625                        rdev_dec_pending(rdev, mddev);
3626                        return 0;
3627                }
3628
3629                spin_lock_irq(&conf->device_lock);
3630                wait_event_lock_irq(conf->wait_for_stripe,
3631                                    conf->quiesce == 0,
3632                                    conf->device_lock, /* nothing */);
3633                atomic_inc(&conf->active_aligned_reads);
3634                spin_unlock_irq(&conf->device_lock);
3635
3636                generic_make_request(align_bi);
3637                return 1;
3638        } else {
3639                rcu_read_unlock();
3640                bio_put(align_bi);
3641                return 0;
3642        }
3643}
3644
3645/* __get_priority_stripe - get the next stripe to process
3646 *
3647 * Full stripe writes are allowed to pass preread active stripes up until
3648 * the bypass_threshold is exceeded.  In general the bypass_count
3649 * increments when the handle_list is handled before the hold_list; however, it
3650 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a
3651 * stripe with in flight i/o.  The bypass_count will be reset when the
3652 * head of the hold_list has changed, i.e. the head was promoted to the
3653 * handle_list.
3654 */
3655static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf)
3656{
3657        struct stripe_head *sh;
3658
3659        pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
3660                  __func__,
3661                  list_empty(&conf->handle_list) ? "empty" : "busy",
3662                  list_empty(&conf->hold_list) ? "empty" : "busy",
3663                  atomic_read(&conf->pending_full_writes), conf->bypass_count);
3664
3665        if (!list_empty(&conf->handle_list)) {
3666                sh = list_entry(conf->handle_list.next, typeof(*sh), lru);
3667
3668                if (list_empty(&conf->hold_list))
3669                        conf->bypass_count = 0;
3670                else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) {
3671                        if (conf->hold_list.next == conf->last_hold)
3672                                conf->bypass_count++;
3673                        else {
3674                                conf->last_hold = conf->hold_list.next;
3675                                conf->bypass_count -= conf->bypass_threshold;
3676                                if (conf->bypass_count < 0)
3677                                        conf->bypass_count = 0;
3678                        }
3679                }
3680        } else if (!list_empty(&conf->hold_list) &&
3681                   ((conf->bypass_threshold &&
3682                     conf->bypass_count > conf->bypass_threshold) ||
3683                    atomic_read(&conf->pending_full_writes) == 0)) {
3684                sh = list_entry(conf->hold_list.next,
3685                                typeof(*sh), lru);
3686                conf->bypass_count -= conf->bypass_threshold;
3687                if (conf->bypass_count < 0)
3688                        conf->bypass_count = 0;
3689        } else
3690                return NULL;
3691
3692        list_del_init(&sh->lru);
3693        atomic_inc(&sh->count);
3694        BUG_ON(atomic_read(&sh->count) != 1);
3695        return sh;
3696}
3697
3698static int make_request(mddev_t *mddev, struct bio * bi)
3699{
3700        raid5_conf_t *conf = mddev->private;
3701        int dd_idx;
3702        sector_t new_sector;
3703        sector_t logical_sector, last_sector;
3704        struct stripe_head *sh;
3705        const int rw = bio_data_dir(bi);
3706        int remaining;
3707        int plugged;
3708
3709        if (unlikely(bi->bi_rw & REQ_FLUSH)) {
3710                md_flush_request(mddev, bi);
3711                return 0;
3712        }
3713
3714        md_write_start(mddev, bi);
3715
3716        if (rw == READ &&
3717             mddev->reshape_position == MaxSector &&
3718             chunk_aligned_read(mddev,bi))
3719                return 0;
3720
3721        logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
3722        last_sector = bi->bi_sector + (bi->bi_size>>9);
3723        bi->bi_next = NULL;
3724        bi->bi_phys_segments = 1;       /* over-loaded to count active stripes */
3725
3726        plugged = mddev_check_plugged(mddev);
3727        for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
3728                DEFINE_WAIT(w);
3729                int disks, data_disks;
3730                int previous;
3731
3732        retry:
3733                previous = 0;
3734                disks = conf->raid_disks;
3735                prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
3736                if (unlikely(conf->reshape_progress != MaxSector)) {
3737                        /* spinlock is needed as reshape_progress may be
3738                         * 64bit on a 32bit platform, and so it might be
3739                         * possible to see a half-updated value
3740                         * Of course reshape_progress could change after
3741                         * the lock is dropped, so once we get a reference
3742                         * to the stripe that we think it is, we will have
3743                         * to check again.
3744                         */
3745                        spin_lock_irq(&conf->device_lock);
3746                        if (mddev->delta_disks < 0
3747                            ? logical_sector < conf->reshape_progress
3748                            : logical_sector >= conf->reshape_progress) {
3749                                disks = conf->previous_raid_disks;
3750                                previous = 1;
3751                        } else {
3752                                if (mddev->delta_disks < 0
3753                                    ? logical_sector < conf->reshape_safe
3754                                    : logical_sector >= conf->reshape_safe) {
3755                                        spin_unlock_irq(&conf->device_lock);
3756                                        schedule();
3757                                        goto retry;
3758                                }
3759                        }
3760                        spin_unlock_irq(&conf->device_lock);
3761                }
3762                data_disks = disks - conf->max_degraded;
3763
3764                new_sector = raid5_compute_sector(conf, logical_sector,
3765                                                  previous,
3766                                                  &dd_idx, NULL);
3767                pr_debug("raid456: make_request, sector %llu logical %llu\n",
3768                        (unsigned long long)new_sector, 
3769                        (unsigned long long)logical_sector);
3770
3771                sh = get_active_stripe(conf, new_sector, previous,
3772                                       (bi->bi_rw&RWA_MASK), 0);
3773                if (sh) {
3774                        if (unlikely(previous)) {
3775                                /* expansion might have moved on while waiting for a
3776                                 * stripe, so we must do the range check again.
3777                                 * Expansion could still move past after this
3778                                 * test, but as we are holding a reference to
3779                                 * 'sh', we know that if that happens,
3780                                 *  STRIPE_EXPANDING will get set and the expansion
3781                                 * won't proceed until we finish with the stripe.
3782                                 */
3783                                int must_retry = 0;
3784                                spin_lock_irq(&conf->device_lock);
3785                                if (mddev->delta_disks < 0
3786                                    ? logical_sector >= conf->reshape_progress
3787                                    : logical_sector < conf->reshape_progress)
3788                                        /* mismatch, need to try again */
3789                                        must_retry = 1;
3790                                spin_unlock_irq(&conf->device_lock);
3791                                if (must_retry) {
3792                                        release_stripe(sh);
3793                                        schedule();
3794                                        goto retry;
3795                                }
3796                        }
3797
3798                        if (rw == WRITE &&
3799                            logical_sector >= mddev->suspend_lo &&
3800                            logical_sector < mddev->suspend_hi) {
3801                                release_stripe(sh);
3802                                /* As the suspend_* range is controlled by
3803                                 * userspace, we want an interruptible
3804                                 * wait.
3805                                 */
3806                                flush_signals(current);
3807                                prepare_to_wait(&conf->wait_for_overlap,
3808                                                &w, TASK_INTERRUPTIBLE);
3809                                if (logical_sector >= mddev->suspend_lo &&
3810                                    logical_sector < mddev->suspend_hi)
3811                                        schedule();
3812                                goto retry;
3813                        }
3814
3815                        if (test_bit(STRIPE_EXPANDING, &sh->state) ||
3816                            !add_stripe_bio(sh, bi, dd_idx, rw)) {
3817                                /* Stripe is busy expanding or
3818                                 * add failed due to overlap.  Flush everything
3819                                 * and wait a while
3820                                 */
3821                                md_wakeup_thread(mddev->thread);
3822                                release_stripe(sh);
3823                                schedule();
3824                                goto retry;
3825                        }
3826                        finish_wait(&conf->wait_for_overlap, &w);
3827                        set_bit(STRIPE_HANDLE, &sh->state);
3828                        clear_bit(STRIPE_DELAYED, &sh->state);
3829                        if ((bi->bi_rw & REQ_SYNC) &&
3830                            !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3831                                atomic_inc(&conf->preread_active_stripes);
3832                        release_stripe(sh);
3833                } else {
3834                        /* cannot get stripe for read-ahead, just give-up */
3835                        clear_bit(BIO_UPTODATE, &bi->bi_flags);
3836                        finish_wait(&conf->wait_for_overlap, &w);
3837                        break;
3838                }
3839                        
3840        }
3841        if (!plugged)
3842                md_wakeup_thread(mddev->thread);
3843
3844        spin_lock_irq(&conf->device_lock);
3845        remaining = raid5_dec_bi_phys_segments(bi);
3846        spin_unlock_irq(&conf->device_lock);
3847        if (remaining == 0) {
3848
3849                if ( rw == WRITE )
3850                        md_write_end(mddev);
3851
3852                bio_endio(bi, 0);
3853        }
3854
3855        return 0;
3856}
3857
3858static sector_t raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks);
3859
3860static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped)
3861{
3862        /* reshaping is quite different to recovery/resync so it is
3863         * handled quite separately ... here.
3864         *
3865         * On each call to sync_request, we gather one chunk worth of
3866         * destination stripes and flag them as expanding.
3867         * Then we find all the source stripes and request reads.
3868         * As the reads complete, handle_stripe will copy the data
3869         * into the destination stripe and release that stripe.
3870         */
3871        raid5_conf_t *conf = mddev->private;
3872        struct stripe_head *sh;
3873        sector_t first_sector, last_sector;
3874        int raid_disks = conf->previous_raid_disks;
3875        int data_disks = raid_disks - conf->max_degraded;
3876        int new_data_disks = conf->raid_disks - conf->max_degraded;
3877        int i;
3878        int dd_idx;
3879        sector_t writepos, readpos, safepos;
3880        sector_t stripe_addr;
3881        int reshape_sectors;
3882        struct list_head stripes;
3883
3884        if (sector_nr == 0) {
3885                /* If restarting in the middle, skip the initial sectors */
3886                if (mddev->delta_disks < 0 &&
3887                    conf->reshape_progress < raid5_size(mddev, 0, 0)) {
3888                        sector_nr = raid5_size(mddev, 0, 0)
3889                                - conf->reshape_progress;
3890                } else if (mddev->delta_disks >= 0 &&
3891                           conf->reshape_progress > 0)
3892                        sector_nr = conf->reshape_progress;
3893                sector_div(sector_nr, new_data_disks);
3894                if (sector_nr) {
3895                        mddev->curr_resync_completed = sector_nr;
3896                        sysfs_notify(&mddev->kobj, NULL, "sync_completed");
3897                        *skipped = 1;
3898                        return sector_nr;
3899                }
3900        }
3901
3902        /* We need to process a full chunk at a time.
3903         * If old and new chunk sizes differ, we need to process the
3904         * largest of these
3905         */
3906        if (mddev->new_chunk_sectors > mddev->chunk_sectors)
3907                reshape_sectors = mddev->new_chunk_sectors;
3908        else
3909                reshape_sectors = mddev->chunk_sectors;
3910
3911        /* we update the metadata when there is more than 3Meg
3912         * in the block range (that is rather arbitrary, should
3913         * probably be time based) or when the data about to be
3914         * copied would over-write the source of the data at
3915         * the front of the range.
3916         * i.e. one new_stripe along from reshape_progress new_maps
3917         * to after where reshape_safe old_maps to
3918         */
3919        writepos = conf->reshape_progress;
3920        sector_div(writepos, new_data_disks);
3921        readpos = conf->reshape_progress;
3922        sector_div(readpos, data_disks);
3923        safepos = conf->reshape_safe;
3924        sector_div(safepos, data_disks);
3925        if (mddev->delta_disks < 0) {
3926                writepos -= min_t(sector_t, reshape_sectors, writepos);
3927                readpos += reshape_sectors;
3928                safepos += reshape_sectors;
3929        } else {
3930                writepos += reshape_sectors;
3931                readpos -= min_t(sector_t, reshape_sectors, readpos);
3932                safepos -= min_t(sector_t, reshape_sectors, safepos);
3933        }
3934
3935        /* 'writepos' is the most advanced device address we might write.
3936         * 'readpos' is the least advanced device address we might read.
3937         * 'safepos' is the least address recorded in the metadata as having
3938         *     been reshaped.
3939         * If 'readpos' is behind 'writepos', then there is no way that we can
3940         * ensure safety in the face of a crash - that must be done by userspace
3941         * making a backup of the data.  So in that case there is no particular
3942         * rush to update metadata.
3943         * Otherwise if 'safepos' is behind 'writepos', then we really need to
3944         * update the metadata to advance 'safepos' to match 'readpos' so that
3945         * we can be safe in the event of a crash.
3946         * So we insist on updating metadata if safepos is behind writepos and
3947         * readpos is beyond writepos.
3948         * In any case, update the metadata every 10 seconds.
3949         * Maybe that number should be configurable, but I'm not sure it is
3950         * worth it.... maybe it could be a multiple of safemode_delay???
3951         */
3952        if ((mddev->delta_disks < 0
3953             ? (safepos > writepos && readpos < writepos)
3954             : (safepos < writepos && readpos > writepos)) ||
3955            time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
3956                /* Cannot proceed until we've updated the superblock... */
3957                wait_event(conf->wait_for_overlap,
3958                           atomic_read(&conf->reshape_stripes)==0);
3959                mddev->reshape_position = conf->reshape_progress;
3960                mddev->curr_resync_completed = sector_nr;
3961                conf->reshape_checkpoint = jiffies;
3962                set_bit(MD_CHANGE_DEVS, &mddev->flags);
3963                md_wakeup_thread(mddev->thread);
3964                wait_event(mddev->sb_wait, mddev->flags == 0 ||
3965                           kthread_should_stop());
3966                spin_lock_irq(&conf->device_lock);
3967                conf->reshape_safe = mddev->reshape_position;
3968                spin_unlock_irq(&conf->device_lock);
3969                wake_up(&conf->wait_for_overlap);
3970                sysfs_notify(&mddev->kobj, NULL, "sync_completed");
3971        }
3972
3973        if (mddev->delta_disks < 0) {
3974                BUG_ON(conf->reshape_progress == 0);
3975                stripe_addr = writepos;
3976                BUG_ON((mddev->dev_sectors &
3977                        ~((sector_t)reshape_sectors - 1))
3978                       - reshape_sectors - stripe_addr
3979                       != sector_nr);
3980        } else {
3981                BUG_ON(writepos != sector_nr + reshape_sectors);
3982                stripe_addr = sector_nr;
3983        }
3984        INIT_LIST_HEAD(&stripes);
3985        for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) {
3986                int j;
3987                int skipped_disk = 0;
3988                sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1);
3989                set_bit(STRIPE_EXPANDING, &sh->state);
3990                atomic_inc(&conf->reshape_stripes);
3991                /* If any of this stripe is beyond the end of the old
3992                 * array, then we need to zero those blocks
3993                 */
3994                for (j=sh->disks; j--;) {
3995                        sector_t s;
3996                        if (j == sh->pd_idx)
3997                                continue;
3998                        if (conf->level == 6 &&
3999                            j == sh->qd_idx)
4000                                continue;
4001                        s = compute_blocknr(sh, j, 0);
4002                        if (s < raid5_size(mddev, 0, 0)) {
4003                                skipped_disk = 1;
4004                                continue;
4005                        }
4006                        memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
4007                        set_bit(R5_Expanded, &sh->dev[j].flags);
4008                        set_bit(R5_UPTODATE, &sh->dev[j].flags);
4009                }
4010                if (!skipped_disk) {
4011                        set_bit(STRIPE_EXPAND_READY, &sh->state);
4012                        set_bit(STRIPE_HANDLE, &sh->state);
4013                }
4014                list_add(&sh->lru, &stripes);
4015        }
4016        spin_lock_irq(&conf->device_lock);
4017        if (mddev->delta_disks < 0)
4018                conf->reshape_progress -= reshape_sectors * new_data_disks;
4019        else
4020                conf->reshape_progress += reshape_sectors * new_data_disks;
4021        spin_unlock_irq(&conf->device_lock);
4022        /* Ok, those stripe are ready. We can start scheduling
4023         * reads on the source stripes.
4024         * The source stripes are determined by mapping the first and last
4025         * block on the destination stripes.
4026         */
4027        first_sector =
4028                raid5_compute_sector(conf, stripe_addr*(new_data_disks),
4029                                     1, &dd_idx, NULL);
4030        last_sector =
4031                raid5_compute_sector(conf, ((stripe_addr+reshape_sectors)
4032                                            * new_data_disks - 1),
4033                                     1, &dd_idx, NULL);
4034        if (last_sector >= mddev->dev_sectors)
4035                last_sector = mddev->dev_sectors - 1;
4036        while (first_sector <= last_sector) {
4037                sh = get_active_stripe(conf, first_sector, 1, 0, 1);
4038                set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
4039                set_bit(STRIPE_HANDLE, &sh->state);
4040                release_stripe(sh);
4041                first_sector += STRIPE_SECTORS;
4042        }
4043        /* Now that the sources are clearly marked, we can release
4044         * the destination stripes
4045         */
4046        while (!list_empty(&stripes)) {
4047                sh = list_entry(stripes.next, struct stripe_head, lru);
4048                list_del_init(&sh->lru);
4049                release_stripe(sh);
4050        }
4051        /* If this takes us to the resync_max point where we have to pause,
4052         * then we need to write out the superblock.
4053         */
4054        sector_nr += reshape_sectors;
4055        if ((sector_nr - mddev->curr_resync_completed) * 2
4056            >= mddev->resync_max - mddev->curr_resync_completed) {
4057                /* Cannot proceed until we've updated the superblock... */
4058                wait_event(conf->wait_for_overlap,
4059                           atomic_read(&conf->reshape_stripes) == 0);
4060                mddev->reshape_position = conf->reshape_progress;
4061                mddev->curr_resync_completed = sector_nr;
4062                conf->reshape_checkpoint = jiffies;
4063                set_bit(MD_CHANGE_DEVS, &mddev->flags);
4064                md_wakeup_thread(mddev->thread);
4065                wait_event(mddev->sb_wait,
4066                           !test_bit(MD_CHANGE_DEVS, &mddev->flags)
4067                           || kthread_should_stop());
4068                spin_lock_irq(&conf->device_lock);
4069                conf->reshape_safe = mddev->reshape_position;
4070                spin_unlock_irq(&conf->device_lock);
4071                wake_up(&conf->wait_for_overlap);
4072                sysfs_notify(&mddev->kobj, NULL, "sync_completed");
4073        }
4074        return reshape_sectors;
4075}
4076
4077/* FIXME go_faster isn't used */
4078static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
4079{
4080        raid5_conf_t *conf = mddev->private;
4081        struct stripe_head *sh;
4082        sector_t max_sector = mddev->dev_sectors;
4083        sector_t sync_blocks;
4084        int still_degraded = 0;
4085        int i;
4086
4087        if (sector_nr >= max_sector) {
4088                /* just being told to finish up .. nothing much to do */
4089
4090                if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
4091                        end_reshape(conf);
4092                        return 0;
4093                }
4094
4095                if (mddev->curr_resync < max_sector) /* aborted */
4096                        bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
4097                                        &sync_blocks, 1);
4098                else /* completed sync */
4099                        conf->fullsync = 0;
4100                bitmap_close_sync(mddev->bitmap);
4101
4102                return 0;
4103        }
4104
4105        /* Allow raid5_quiesce to complete */
4106        wait_event(conf->wait_for_overlap, conf->quiesce != 2);
4107
4108        if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
4109                return reshape_request(mddev, sector_nr, skipped);
4110
4111        /* No need to check resync_max as we never do more than one
4112         * stripe, and as resync_max will always be on a chunk boundary,
4113         * if the check in md_do_sync didn't fire, there is no chance
4114         * of overstepping resync_max here
4115         */
4116
4117        /* if there is too many failed drives and we are trying
4118         * to resync, then assert that we are finished, because there is
4119         * nothing we can do.
4120         */
4121        if (mddev->degraded >= conf->max_degraded &&
4122            test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
4123                sector_t rv = mddev->dev_sectors - sector_nr;
4124                *skipped = 1;
4125                return rv;
4126        }
4127        if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
4128            !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
4129            !conf->fullsync && sync_blocks >= STRIPE_SECTORS) {
4130                /* we can skip this block, and probably more */
4131                sync_blocks /= STRIPE_SECTORS;
4132                *skipped = 1;
4133                return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
4134        }
4135
4136
4137        bitmap_cond_end_sync(mddev->bitmap, sector_nr);
4138
4139        sh = get_active_stripe(conf, sector_nr, 0, 1, 0);
4140        if (sh == NULL) {
4141                sh = get_active_stripe(conf, sector_nr, 0, 0, 0);
4142                /* make sure we don't swamp the stripe cache if someone else
4143                 * is trying to get access
4144                 */
4145                schedule_timeout_uninterruptible(1);
4146        }
4147        /* Need to check if array will still be degraded after recovery/resync
4148         * We don't need to check the 'failed' flag as when that gets set,
4149         * recovery aborts.
4150         */
4151        for (i = 0; i < conf->raid_disks; i++)
4152                if (conf->disks[i].rdev == NULL)
4153                        still_degraded = 1;
4154
4155        bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
4156
4157        set_bit(STRIPE_SYNC_REQUESTED, &sh->state);
4158
4159        handle_stripe(sh);
4160        release_stripe(sh);
4161
4162        return STRIPE_SECTORS;
4163}
4164
4165static int  retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
4166{
4167        /* We may not be able to submit a whole bio at once as there
4168         * may not be enough stripe_heads available.
4169         * We cannot pre-allocate enough stripe_heads as we may need
4170         * more than exist in the cache (if we allow ever large chunks).
4171         * So we do one stripe head at a time and record in
4172         * ->bi_hw_segments how many have been done.
4173         *
4174         * We *know* that this entire raid_bio is in one chunk, so
4175         * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
4176         */
4177        struct stripe_head *sh;
4178        int dd_idx;
4179        sector_t sector, logical_sector, last_sector;
4180        int scnt = 0;
4181        int remaining;
4182        int handled = 0;
4183
4184        logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
4185        sector = raid5_compute_sector(conf, logical_sector,
4186                                      0, &dd_idx, NULL);
4187        last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9);
4188
4189        for (; logical_sector < last_sector;
4190             logical_sector += STRIPE_SECTORS,
4191                     sector += STRIPE_SECTORS,
4192                     scnt++) {
4193
4194                if (scnt < raid5_bi_hw_segments(raid_bio))
4195                        /* already done this stripe */
4196                        continue;
4197
4198                sh = get_active_stripe(conf, sector, 0, 1, 0);
4199
4200                if (!sh) {
4201                        /* failed to get a stripe - must wait */
4202                        raid5_set_bi_hw_segments(raid_bio, scnt);
4203                        conf->retry_read_aligned = raid_bio;
4204                        return handled;
4205                }
4206
4207                set_bit(R5_ReadError, &sh->dev[dd_idx].flags);
4208                if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
4209                        release_stripe(sh);
4210                        raid5_set_bi_hw_segments(raid_bio, scnt);
4211                        conf->retry_read_aligned = raid_bio;
4212                        return handled;
4213                }
4214
4215                handle_stripe(sh);
4216                release_stripe(sh);
4217                handled++;
4218        }
4219        spin_lock_irq(&conf->device_lock);
4220        remaining = raid5_dec_bi_phys_segments(raid_bio);
4221        spin_unlock_irq(&conf->device_lock);
4222        if (remaining == 0)
4223                bio_endio(raid_bio, 0);
4224        if (atomic_dec_and_test(&conf->active_aligned_reads))
4225                wake_up(&conf->wait_for_stripe);
4226        return handled;
4227}
4228
4229
4230/*
4231 * This is our raid5 kernel thread.
4232 *
4233 * We scan the hash table for stripes which can be handled now.
4234 * During the scan, completed stripes are saved for us by the interrupt
4235 * handler, so that they will not have to wait for our next wakeup.
4236 */
4237static void raid5d(mddev_t *mddev)
4238{
4239        struct stripe_head *sh;
4240        raid5_conf_t *conf = mddev->private;
4241        int handled;
4242        struct blk_plug plug;
4243
4244        pr_debug("+++ raid5d active\n");
4245
4246        md_check_recovery(mddev);
4247
4248        blk_start_plug(&plug);
4249        handled = 0;
4250        spin_lock_irq(&conf->device_lock);
4251        while (1) {
4252                struct bio *bio;
4253
4254                if (atomic_read(&mddev->plug_cnt) == 0 &&
4255                    !list_empty(&conf->bitmap_list)) {
4256                        /* Now is a good time to flush some bitmap updates */
4257                        conf->seq_flush++;
4258                        spin_unlock_irq(&conf->device_lock);
4259                        bitmap_unplug(mddev->bitmap);
4260                        spin_lock_irq(&conf->device_lock);
4261                        conf->seq_write = conf->seq_flush;
4262                        activate_bit_delay(conf);
4263                }
4264                if (atomic_read(&mddev->plug_cnt) == 0)
4265                        raid5_activate_delayed(conf);
4266
4267                while ((bio = remove_bio_from_retry(conf))) {
4268                        int ok;
4269                        spin_unlock_irq(&conf->device_lock);
4270                        ok = retry_aligned_read(conf, bio);
4271                        spin_lock_irq(&conf->device_lock);
4272                        if (!ok)
4273                                break;
4274                        handled++;
4275                }
4276
4277                sh = __get_priority_stripe(conf);
4278
4279                if (!sh)
4280                        break;
4281                spin_unlock_irq(&conf->device_lock);
4282                
4283                handled++;
4284                handle_stripe(sh);
4285                release_stripe(sh);
4286                cond_resched();
4287
4288                if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
4289                        md_check_recovery(mddev);
4290
4291                spin_lock_irq(&conf->device_lock);
4292        }
4293        pr_debug("%d stripes handled\n", handled);
4294
4295        spin_unlock_irq(&conf->device_lock);
4296
4297        async_tx_issue_pending_all();
4298        blk_finish_plug(&plug);
4299
4300        pr_debug("--- raid5d inactive\n");
4301}
4302
4303static ssize_t
4304raid5_show_stripe_cache_size(mddev_t *mddev, char *page)
4305{
4306        raid5_conf_t *conf = mddev->private;
4307        if (conf)
4308                return sprintf(page, "%d\n", conf->max_nr_stripes);
4309        else
4310                return 0;
4311}
4312
4313int
4314raid5_set_cache_size(mddev_t *mddev, int size)
4315{
4316        raid5_conf_t *conf = mddev->private;
4317        int err;
4318
4319        if (size <= 16 || size > 32768)
4320                return -EINVAL;
4321        while (size < conf->max_nr_stripes) {
4322                if (drop_one_stripe(conf))
4323                        conf->max_nr_stripes--;
4324                else
4325                        break;
4326        }
4327        err = md_allow_write(mddev);
4328        if (err)
4329                return err;
4330        while (size > conf->max_nr_stripes) {
4331                if (grow_one_stripe(conf))
4332                        conf->max_nr_stripes++;
4333                else break;
4334        }
4335        return 0;
4336}
4337EXPORT_SYMBOL(raid5_set_cache_size);
4338
4339static ssize_t
4340raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
4341{
4342        raid5_conf_t *conf = mddev->private;
4343        unsigned long new;
4344        int err;
4345
4346        if (len >= PAGE_SIZE)
4347                return -EINVAL;
4348        if (!conf)
4349                return -ENODEV;
4350
4351        if (strict_strtoul(page, 10, &new))
4352                return -EINVAL;
4353        err = raid5_set_cache_size(mddev, new);
4354        if (err)
4355                return err;
4356        return len;
4357}
4358
4359static struct md_sysfs_entry
4360raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
4361                                raid5_show_stripe_cache_size,
4362                                raid5_store_stripe_cache_size);
4363
4364static ssize_t
4365raid5_show_preread_threshold(mddev_t *mddev, char *page)
4366{
4367        raid5_conf_t *conf = mddev->private;
4368        if (conf)
4369                return sprintf(page, "%d\n", conf->bypass_threshold);
4370        else
4371                return 0;
4372}
4373
4374static ssize_t
4375raid5_store_preread_threshold(mddev_t *mddev, const char *page, size_t len)
4376{
4377        raid5_conf_t *conf = mddev->private;
4378        unsigned long new;
4379        if (len >= PAGE_SIZE)
4380                return -EINVAL;
4381        if (!conf)
4382                return -ENODEV;
4383
4384        if (strict_strtoul(page, 10, &new))
4385                return -EINVAL;
4386        if (new > conf->max_nr_stripes)
4387                return -EINVAL;
4388        conf->bypass_threshold = new;
4389        return len;
4390}
4391
4392static struct md_sysfs_entry
4393raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold,
4394                                        S_IRUGO | S_IWUSR,
4395                                        raid5_show_preread_threshold,
4396                                        raid5_store_preread_threshold);
4397
4398static ssize_t
4399stripe_cache_active_show(mddev_t *mddev, char *page)
4400{
4401        raid5_conf_t *conf = mddev->private;
4402        if (conf)
4403                return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
4404        else
4405                return 0;
4406}
4407
4408static struct md_sysfs_entry
4409raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
4410
4411static struct attribute *raid5_attrs[] =  {
4412        &raid5_stripecache_size.attr,
4413        &raid5_stripecache_active.attr,
4414        &raid5_preread_bypass_threshold.attr,
4415        NULL,
4416};
4417static struct attribute_group raid5_attrs_group = {
4418        .name = NULL,
4419        .attrs = raid5_attrs,
4420};
4421
4422static sector_t
4423raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks)
4424{
4425        raid5_conf_t *conf = mddev->private;
4426
4427        if (!sectors)
4428                sectors = mddev->dev_sectors;
4429        if (!raid_disks)
4430                /* size is defined by the smallest of previous and new size */
4431                raid_disks = min(conf->raid_disks, conf->previous_raid_disks);
4432
4433        sectors &= ~((sector_t)mddev->chunk_sectors - 1);
4434        sectors &= ~((sector_t)mddev->new_chunk_sectors - 1);
4435        return sectors * (raid_disks - conf->max_degraded);
4436}
4437
4438static void raid5_free_percpu(raid5_conf_t *conf)
4439{
4440        struct raid5_percpu *percpu;
4441        unsigned long cpu;
4442
4443        if (!conf->percpu)
4444                return;
4445
4446        get_online_cpus();
4447        for_each_possible_cpu(cpu) {
4448                percpu = per_cpu_ptr(conf->percpu, cpu);
4449                safe_put_page(percpu->spare_page);
4450                kfree(percpu->scribble);
4451        }
4452#ifdef CONFIG_HOTPLUG_CPU
4453        unregister_cpu_notifier(&conf->cpu_notify);
4454#endif
4455        put_online_cpus();
4456
4457        free_percpu(conf->percpu);
4458}
4459
4460static void free_conf(raid5_conf_t *conf)
4461{
4462        shrink_stripes(conf);
4463        raid5_free_percpu(conf);
4464        kfree(conf->disks);
4465        kfree(conf->stripe_hashtbl);
4466        kfree(conf);
4467}
4468
4469#ifdef CONFIG_HOTPLUG_CPU
4470static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
4471                              void *hcpu)
4472{
4473        raid5_conf_t *conf = container_of(nfb, raid5_conf_t, cpu_notify);
4474        long cpu = (long)hcpu;
4475        struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
4476
4477        switch (action) {
4478        case CPU_UP_PREPARE:
4479        case CPU_UP_PREPARE_FROZEN:
4480                if (conf->level == 6 && !percpu->spare_page)
4481                        percpu->spare_page = alloc_page(GFP_KERNEL);
4482                if (!percpu->scribble)
4483                        percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
4484
4485                if (!percpu->scribble ||
4486                    (conf->level == 6 && !percpu->spare_page)) {
4487                        safe_put_page(percpu->spare_page);
4488                        kfree(percpu->scribble);
4489                        pr_err("%s: failed memory allocation for cpu%ld\n",
4490                               __func__, cpu);
4491                        return notifier_from_errno(-ENOMEM);
4492                }
4493                break;
4494        case CPU_DEAD:
4495        case CPU_DEAD_FROZEN:
4496                safe_put_page(percpu->spare_page);
4497                kfree(percpu->scribble);
4498                percpu->spare_page = NULL;
4499                percpu->scribble = NULL;
4500                break;
4501        default:
4502                break;
4503        }
4504        return NOTIFY_OK;
4505}
4506#endif
4507
4508static int raid5_alloc_percpu(raid5_conf_t *conf)
4509{
4510        unsigned long cpu;
4511        struct page *spare_page;
4512        struct raid5_percpu __percpu *allcpus;
4513        void *scribble;
4514        int err;
4515
4516        allcpus = alloc_percpu(struct raid5_percpu);
4517        if (!allcpus)
4518                return -ENOMEM;
4519        conf->percpu = allcpus;
4520
4521        get_online_cpus();
4522        err = 0;
4523        for_each_present_cpu(cpu) {
4524                if (conf->level == 6) {
4525                        spare_page = alloc_page(GFP_KERNEL);
4526                        if (!spare_page) {
4527                                err = -ENOMEM;
4528                                break;
4529                        }
4530                        per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
4531                }
4532                scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
4533                if (!scribble) {
4534                        err = -ENOMEM;
4535                        break;
4536                }
4537                per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
4538        }
4539#ifdef CONFIG_HOTPLUG_CPU
4540        conf->cpu_notify.notifier_call = raid456_cpu_notify;
4541        conf->cpu_notify.priority = 0;
4542        if (err == 0)
4543                err = register_cpu_notifier(&conf->cpu_notify);
4544#endif
4545        put_online_cpus();
4546
4547        return err;
4548}
4549
4550static raid5_conf_t *setup_conf(mddev_t *mddev)
4551{
4552        raid5_conf_t *conf;
4553        int raid_disk, memory, max_disks;
4554        mdk_rdev_t *rdev;
4555        struct disk_info *disk;
4556
4557        if (mddev->new_level != 5
4558            && mddev->new_level != 4
4559            && mddev->new_level != 6) {
4560                printk(KERN_ERR "md/raid:%s: raid level not set to 4/5/6 (%d)\n",
4561                       mdname(mddev), mddev->new_level);
4562                return ERR_PTR(-EIO);
4563        }
4564        if ((mddev->new_level == 5
4565             && !algorithm_valid_raid5(mddev->new_layout)) ||
4566            (mddev->new_level == 6
4567             && !algorithm_valid_raid6(mddev->new_layout))) {
4568                printk(KERN_ERR "md/raid:%s: layout %d not supported\n",
4569                       mdname(mddev), mddev->new_layout);
4570                return ERR_PTR(-EIO);
4571        }
4572        if (mddev->new_level == 6 && mddev->raid_disks < 4) {
4573                printk(KERN_ERR "md/raid:%s: not enough configured devices (%d, minimum 4)\n",
4574                       mdname(mddev), mddev->raid_disks);
4575                return ERR_PTR(-EINVAL);
4576        }
4577
4578        if (!mddev->new_chunk_sectors ||
4579            (mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
4580            !is_power_of_2(mddev->new_chunk_sectors)) {
4581                printk(KERN_ERR "md/raid:%s: invalid chunk size %d\n",
4582                       mdname(mddev), mddev->new_chunk_sectors << 9);
4583                return ERR_PTR(-EINVAL);
4584        }
4585
4586        conf = kzalloc(sizeof(raid5_conf_t), GFP_KERNEL);
4587        if (conf == NULL)
4588                goto abort;
4589        spin_lock_init(&conf->device_lock);
4590        init_waitqueue_head(&conf->wait_for_stripe);
4591        init_waitqueue_head(&conf->wait_for_overlap);
4592        INIT_LIST_HEAD(&conf->handle_list);
4593        INIT_LIST_HEAD(&conf->hold_list);
4594        INIT_LIST_HEAD(&conf->delayed_list);
4595        INIT_LIST_HEAD(&conf->bitmap_list);
4596        INIT_LIST_HEAD(&conf->inactive_list);
4597        atomic_set(&conf->active_stripes, 0);
4598        atomic_set(&conf->preread_active_stripes, 0);
4599        atomic_set(&conf->active_aligned_reads, 0);
4600        conf->bypass_threshold = BYPASS_THRESHOLD;
4601
4602        conf->raid_disks = mddev->raid_disks;
4603        if (mddev->reshape_position == MaxSector)
4604                conf->previous_raid_disks = mddev->raid_disks;
4605        else
4606                conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
4607        max_disks = max(conf->raid_disks, conf->previous_raid_disks);
4608        conf->scribble_len = scribble_len(max_disks);
4609
4610        conf->disks = kzalloc(max_disks * sizeof(struct disk_info),
4611                              GFP_KERNEL);
4612        if (!conf->disks)
4613                goto abort;
4614
4615        conf->mddev = mddev;
4616
4617        if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
4618                goto abort;
4619
4620        conf->level = mddev->new_level;
4621        if (raid5_alloc_percpu(conf) != 0)
4622                goto abort;
4623
4624        pr_debug("raid456: run(%s) called.\n", mdname(mddev));
4625
4626        list_for_each_entry(rdev, &mddev->disks, same_set) {
4627                raid_disk = rdev->raid_disk;
4628                if (raid_disk >= max_disks
4629                    || raid_disk < 0)
4630                        continue;
4631                disk = conf->disks + raid_disk;
4632
4633                disk->rdev = rdev;
4634
4635                if (test_bit(In_sync, &rdev->flags)) {
4636                        char b[BDEVNAME_SIZE];
4637                        printk(KERN_INFO "md/raid:%s: device %s operational as raid"
4638                               " disk %d\n",
4639                               mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
4640                } else if (rdev->saved_raid_disk != raid_disk)
4641                        /* Cannot rely on bitmap to complete recovery */
4642                        conf->fullsync = 1;
4643        }
4644
4645        conf->chunk_sectors = mddev->new_chunk_sectors;
4646        conf->level = mddev->new_level;
4647        if (conf->level == 6)
4648                conf->max_degraded = 2;
4649        else
4650                conf->max_degraded = 1;
4651        conf->algorithm = mddev->new_layout;
4652        conf->max_nr_stripes = NR_STRIPES;
4653        conf->reshape_progress = mddev->reshape_position;
4654        if (conf->reshape_progress != MaxSector) {
4655                conf->prev_chunk_sectors = mddev->chunk_sectors;
4656                conf->prev_algo = mddev->layout;
4657        }
4658
4659        memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
4660                 max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
4661        if (grow_stripes(conf, conf->max_nr_stripes)) {
4662                printk(KERN_ERR
4663                       "md/raid:%s: couldn't allocate %dkB for buffers\n",
4664                       mdname(mddev), memory);
4665                goto abort;
4666        } else
4667                printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
4668                       mdname(mddev), memory);
4669
4670        conf->thread = md_register_thread(raid5d, mddev, NULL);
4671        if (!conf->thread) {
4672                printk(KERN_ERR
4673                       "md/raid:%s: couldn't allocate thread.\n",
4674                       mdname(mddev));
4675                goto abort;
4676        }
4677
4678        return conf;
4679
4680 abort:
4681        if (conf) {
4682                free_conf(conf);
4683                return ERR_PTR(-EIO);
4684        } else
4685                return ERR_PTR(-ENOMEM);
4686}
4687
4688
4689static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded)
4690{
4691        switch (algo) {
4692        case ALGORITHM_PARITY_0:
4693                if (raid_disk < max_degraded)
4694                        return 1;
4695                break;
4696        case ALGORITHM_PARITY_N:
4697                if (raid_disk >= raid_disks - max_degraded)
4698                        return 1;
4699                break;
4700        case ALGORITHM_PARITY_0_6:
4701                if (raid_disk == 0 || 
4702                    raid_disk == raid_disks - 1)
4703                        return 1;
4704                break;
4705        case ALGORITHM_LEFT_ASYMMETRIC_6:
4706        case ALGORITHM_RIGHT_ASYMMETRIC_6:
4707        case ALGORITHM_LEFT_SYMMETRIC_6:
4708        case ALGORITHM_RIGHT_SYMMETRIC_6:
4709                if (raid_disk == raid_disks - 1)
4710                        return 1;
4711        }
4712        return 0;
4713}
4714
4715static int run(mddev_t *mddev)
4716{
4717        raid5_conf_t *conf;
4718        int working_disks = 0;
4719        int dirty_parity_disks = 0;
4720        mdk_rdev_t *rdev;
4721        sector_t reshape_offset = 0;
4722
4723        if (mddev->recovery_cp != MaxSector)
4724                printk(KERN_NOTICE "md/raid:%s: not clean"
4725                       " -- starting background reconstruction\n",
4726                       mdname(mddev));
4727        if (mddev->reshape_position != MaxSector) {
4728                /* Check that we can continue the reshape.
4729                 * Currently only disks can change, it must
4730                 * increase, and we must be past the point where
4731                 * a stripe over-writes itself
4732                 */
4733                sector_t here_new, here_old;
4734                int old_disks;
4735                int max_degraded = (mddev->level == 6 ? 2 : 1);
4736
4737                if (mddev->new_level != mddev->level) {
4738                        printk(KERN_ERR "md/raid:%s: unsupported reshape "
4739                               "required - aborting.\n",
4740                               mdname(mddev));
4741                        return -EINVAL;
4742                }
4743                old_disks = mddev->raid_disks - mddev->delta_disks;
4744                /* reshape_position must be on a new-stripe boundary, and one
4745                 * further up in new geometry must map after here in old
4746                 * geometry.
4747                 */
4748                here_new = mddev->reshape_position;
4749                if (sector_div(here_new, mddev->new_chunk_sectors *
4750                               (mddev->raid_disks - max_degraded))) {
4751                        printk(KERN_ERR "md/raid:%s: reshape_position not "
4752                               "on a stripe boundary\n", mdname(mddev));
4753                        return -EINVAL;
4754                }
4755                reshape_offset = here_new * mddev->new_chunk_sectors;
4756                /* here_new is the stripe we will write to */
4757                here_old = mddev->reshape_position;
4758                sector_div(here_old, mddev->chunk_sectors *
4759                           (old_disks-max_degraded));
4760                /* here_old is the first stripe that we might need to read
4761                 * from */
4762                if (mddev->delta_disks == 0) {
4763                        /* We cannot be sure it is safe to start an in-place
4764                         * reshape.  It is only safe if user-space if monitoring
4765                         * and taking constant backups.
4766                         * mdadm always starts a situation like this in
4767                         * readonly mode so it can take control before
4768                         * allowing any writes.  So just check for that.
4769                         */
4770                        if ((here_new * mddev->new_chunk_sectors != 
4771                             here_old * mddev->chunk_sectors) ||
4772                            mddev->ro == 0) {
4773                                printk(KERN_ERR "md/raid:%s: in-place reshape must be started"
4774                                       " in read-only mode - aborting\n",
4775                                       mdname(mddev));
4776                                return -EINVAL;
4777                        }
4778                } else if (mddev->delta_disks < 0
4779                    ? (here_new * mddev->new_chunk_sectors <=
4780                       here_old * mddev->chunk_sectors)
4781                    : (here_new * mddev->new_chunk_sectors >=
4782                       here_old * mddev->chunk_sectors)) {
4783                        /* Reading from the same stripe as writing to - bad */
4784                        printk(KERN_ERR "md/raid:%s: reshape_position too early for "
4785                               "auto-recovery - aborting.\n",
4786                               mdname(mddev));
4787                        return -EINVAL;
4788                }
4789                printk(KERN_INFO "md/raid:%s: reshape will continue\n",
4790                       mdname(mddev));
4791                /* OK, we should be able to continue; */
4792        } else {
4793                BUG_ON(mddev->level != mddev->new_level);
4794                BUG_ON(mddev->layout != mddev->new_layout);
4795                BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors);
4796                BUG_ON(mddev->delta_disks != 0);
4797        }
4798
4799        if (mddev->private == NULL)
4800                conf = setup_conf(mddev);
4801        else
4802                conf = mddev->private;
4803
4804        if (IS_ERR(conf))
4805                return PTR_ERR(conf);
4806
4807        mddev->thread = conf->thread;
4808        conf->thread = NULL;
4809        mddev->private = conf;
4810
4811        /*
4812         * 0 for a fully functional array, 1 or 2 for a degraded array.
4813         */
4814        list_for_each_entry(rdev, &mddev->disks, same_set) {
4815                if (rdev->raid_disk < 0)
4816                        continue;
4817                if (test_bit(In_sync, &rdev->flags)) {
4818                        working_disks++;
4819                        continue;
4820                }
4821                /* This disc is not fully in-sync.  However if it
4822                 * just stored parity (beyond the recovery_offset),
4823                 * when we don't need to be concerned about the
4824                 * array being dirty.
4825                 * When reshape goes 'backwards', we never have
4826                 * partially completed devices, so we only need
4827                 * to worry about reshape going forwards.
4828                 */
4829                /* Hack because v0.91 doesn't store recovery_offset properly. */
4830                if (mddev->major_version == 0 &&
4831                    mddev->minor_version > 90)
4832                        rdev->recovery_offset = reshape_offset;
4833                        
4834                if (rdev->recovery_offset < reshape_offset) {
4835                        /* We need to check old and new layout */
4836                        if (!only_parity(rdev->raid_disk,
4837                                         conf->algorithm,
4838                                         conf->raid_disks,
4839                                         conf->max_degraded))
4840                                continue;
4841                }
4842                if (!only_parity(rdev->raid_disk,
4843                                 conf->prev_algo,
4844                                 conf->previous_raid_disks,
4845                                 conf->max_degraded))
4846                        continue;
4847                dirty_parity_disks++;
4848        }
4849
4850        mddev->degraded = (max(conf->raid_disks, conf->previous_raid_disks)
4851                           - working_disks);
4852
4853        if (has_failed(conf)) {
4854                printk(KERN_ERR "md/raid:%s: not enough operational devices"
4855                        " (%d/%d failed)\n",
4856                        mdname(mddev), mddev->degraded, conf->raid_disks);
4857                goto abort;
4858        }
4859
4860        /* device size must be a multiple of chunk size */
4861        mddev->dev_sectors &= ~(mddev->chunk_sectors - 1);
4862        mddev->resync_max_sectors = mddev->dev_sectors;
4863
4864        if (mddev->degraded > dirty_parity_disks &&
4865            mddev->recovery_cp != MaxSector) {
4866                if (mddev->ok_start_degraded)
4867                        printk(KERN_WARNING
4868                               "md/raid:%s: starting dirty degraded array"
4869                               " - data corruption possible.\n",
4870                               mdname(mddev));
4871                else {
4872                        printk(KERN_ERR
4873                               "md/raid:%s: cannot start dirty degraded array.\n",
4874                               mdname(mddev));
4875                        goto abort;
4876                }
4877        }
4878
4879        if (mddev->degraded == 0)
4880                printk(KERN_INFO "md/raid:%s: raid level %d active with %d out of %d"
4881                       " devices, algorithm %d\n", mdname(mddev), conf->level,
4882                       mddev->raid_disks-mddev->degraded, mddev->raid_disks,
4883                       mddev->new_layout);
4884        else
4885                printk(KERN_ALERT "md/raid:%s: raid level %d active with %d"
4886                       " out of %d devices, algorithm %d\n",
4887                       mdname(mddev), conf->level,
4888                       mddev->raid_disks - mddev->degraded,
4889                       mddev->raid_disks, mddev->new_layout);
4890
4891        print_raid5_conf(conf);
4892
4893        if (conf->reshape_progress != MaxSector) {
4894                conf->reshape_safe = conf->reshape_progress;
4895                atomic_set(&conf->reshape_stripes, 0);
4896                clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4897                clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4898                set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4899                set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4900                mddev->sync_thread = md_register_thread(md_do_sync, mddev,
4901                                                        "reshape");
4902        }
4903
4904
4905        /* Ok, everything is just fine now */
4906        if (mddev->to_remove == &raid5_attrs_group)
4907                mddev->to_remove = NULL;
4908        else if (mddev->kobj.sd &&
4909            sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
4910                printk(KERN_WARNING
4911                       "raid5: failed to create sysfs attributes for %s\n",
4912                       mdname(mddev));
4913        md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
4914
4915        if (mddev->queue) {
4916                int chunk_size;
4917                /* read-ahead size must cover two whole stripes, which
4918                 * is 2 * (datadisks) * chunksize where 'n' is the
4919                 * number of raid devices
4920                 */
4921                int data_disks = conf->previous_raid_disks - conf->max_degraded;
4922                int stripe = data_disks *
4923                        ((mddev->chunk_sectors << 9) / PAGE_SIZE);
4924                if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
4925                        mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
4926
4927                blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
4928
4929                mddev->queue->backing_dev_info.congested_data = mddev;
4930                mddev->queue->backing_dev_info.congested_fn = raid5_congested;
4931
4932                chunk_size = mddev->chunk_sectors << 9;
4933                blk_queue_io_min(mddev->queue, chunk_size);
4934                blk_queue_io_opt(mddev->queue, chunk_size *
4935                                 (conf->raid_disks - conf->max_degraded));
4936
4937                list_for_each_entry(rdev, &mddev->disks, same_set)
4938                        disk_stack_limits(mddev->gendisk, rdev->bdev,
4939                                          rdev->data_offset << 9);
4940        }
4941
4942        return 0;
4943abort:
4944        md_unregister_thread(&mddev->thread);
4945        if (conf) {
4946                print_raid5_conf(conf);
4947                free_conf(conf);
4948        }
4949        mddev->private = NULL;
4950        printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev));
4951        return -EIO;
4952}
4953
4954static int stop(mddev_t *mddev)
4955{
4956        raid5_conf_t *conf = mddev->private;
4957
4958        md_unregister_thread(&mddev->thread);
4959        if (mddev->queue)
4960                mddev->queue->backing_dev_info.congested_fn = NULL;
4961        free_conf(conf);
4962        mddev->private = NULL;
4963        mddev->to_remove = &raid5_attrs_group;
4964        return 0;
4965}
4966
4967#ifdef DEBUG
4968static void print_sh(struct seq_file *seq, struct stripe_head *sh)
4969{
4970        int i;
4971
4972        seq_printf(seq, "sh %llu, pd_idx %d, state %ld.\n",
4973                   (unsigned long long)sh->sector, sh->pd_idx, sh->state);
4974        seq_printf(seq, "sh %llu,  count %d.\n",
4975                   (unsigned long long)sh->sector, atomic_read(&sh->count));
4976        seq_printf(seq, "sh %llu, ", (unsigned long long)sh->sector);
4977        for (i = 0; i < sh->disks; i++) {
4978                seq_printf(seq, "(cache%d: %p %ld) ",
4979                           i, sh->dev[i].page, sh->dev[i].flags);
4980        }
4981        seq_printf(seq, "\n");
4982}
4983
4984static void printall(struct seq_file *seq, raid5_conf_t *conf)
4985{
4986        struct stripe_head *sh;
4987        struct hlist_node *hn;
4988        int i;
4989
4990        spin_lock_irq(&conf->device_lock);
4991        for (i = 0; i < NR_HASH; i++) {
4992                hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) {
4993                        if (sh->raid_conf != conf)
4994                                continue;
4995                        print_sh(seq, sh);
4996                }
4997        }
4998        spin_unlock_irq(&conf->device_lock);
4999}
5000#endif
5001
5002static void status(struct seq_file *seq, mddev_t *mddev)
5003{
5004        raid5_conf_t *conf = mddev->private;
5005        int i;
5006
5007        seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
5008                mddev->chunk_sectors / 2, mddev->layout);
5009        seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
5010        for (i = 0; i < conf->raid_disks; i++)
5011                seq_printf (seq, "%s",
5012                               conf->disks[i].rdev &&
5013                               test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
5014        seq_printf (seq, "]");
5015#ifdef DEBUG
5016        seq_printf (seq, "\n");
5017        printall(seq, conf);
5018#endif
5019}
5020
5021static void print_raid5_conf (raid5_conf_t *conf)
5022{
5023        int i;
5024        struct disk_info *tmp;
5025
5026        printk(KERN_DEBUG "RAID conf printout:\n");
5027        if (!conf) {
5028                printk("(conf==NULL)\n");
5029                return;
5030        }
5031        printk(KERN_DEBUG " --- level:%d rd:%d wd:%d\n", conf->level,
5032               conf->raid_disks,
5033               conf->raid_disks - conf->mddev->degraded);
5034
5035        for (i = 0; i < conf->raid_disks; i++) {
5036                char b[BDEVNAME_SIZE];
5037                tmp = conf->disks + i;
5038                if (tmp->rdev)
5039                        printk(KERN_DEBUG " disk %d, o:%d, dev:%s\n",
5040                               i, !test_bit(Faulty, &tmp->rdev->flags),
5041                               bdevname(tmp->rdev->bdev, b));
5042        }
5043}
5044
5045static int raid5_spare_active(mddev_t *mddev)
5046{
5047        int i;
5048        raid5_conf_t *conf = mddev->private;
5049        struct disk_info *tmp;
5050        int count = 0;
5051        unsigned long flags;
5052
5053        for (i = 0; i < conf->raid_disks; i++) {
5054                tmp = conf->disks + i;
5055                if (tmp->rdev
5056                    && tmp->rdev->recovery_offset == MaxSector
5057                    && !test_bit(Faulty, &tmp->rdev->flags)
5058                    && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
5059                        count++;
5060                        sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
5061                }
5062        }
5063        spin_lock_irqsave(&conf->device_lock, flags);
5064        mddev->degraded -= count;
5065        spin_unlock_irqrestore(&conf->device_lock, flags);
5066        print_raid5_conf(conf);
5067        return count;
5068}
5069
5070static int raid5_remove_disk(mddev_t *mddev, int number)
5071{
5072        raid5_conf_t *conf = mddev->private;
5073        int err = 0;
5074        mdk_rdev_t *rdev;
5075        struct disk_info *p = conf->disks + number;
5076
5077        print_raid5_conf(conf);
5078        rdev = p->rdev;
5079        if (rdev) {
5080                if (number >= conf->raid_disks &&
5081                    conf->reshape_progress == MaxSector)
5082                        clear_bit(In_sync, &rdev->flags);
5083
5084                if (test_bit(In_sync, &rdev->flags) ||
5085                    atomic_read(&rdev->nr_pending)) {
5086                        err = -EBUSY;
5087                        goto abort;
5088                }
5089                /* Only remove non-faulty devices if recovery
5090                 * isn't possible.
5091                 */
5092                if (!test_bit(Faulty, &rdev->flags) &&
5093                    mddev->recovery_disabled != conf->recovery_disabled &&
5094                    !has_failed(conf) &&
5095                    number < conf->raid_disks) {
5096                        err = -EBUSY;
5097                        goto abort;
5098                }
5099                p->rdev = NULL;
5100                synchronize_rcu();
5101                if (atomic_read(&rdev->nr_pending)) {
5102                        /* lost the race, try later */
5103                        err = -EBUSY;
5104                        p->rdev = rdev;
5105                }
5106        }
5107abort:
5108
5109        print_raid5_conf(conf);
5110        return err;
5111}
5112
5113static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
5114{
5115        raid5_conf_t *conf = mddev->private;
5116        int err = -EEXIST;
5117        int disk;
5118        struct disk_info *p;
5119        int first = 0;
5120        int last = conf->raid_disks - 1;
5121
5122        if (mddev->recovery_disabled == conf->recovery_disabled)
5123                return -EBUSY;
5124
5125        if (has_failed(conf))
5126                /* no point adding a device */
5127                return -EINVAL;
5128
5129        if (rdev->raid_disk >= 0)
5130                first = last = rdev->raid_disk;
5131
5132        /*
5133         * find the disk ... but prefer rdev->saved_raid_disk
5134         * if possible.
5135         */
5136        if (rdev->saved_raid_disk >= 0 &&
5137            rdev->saved_raid_disk >= first &&
5138            conf->disks[rdev->saved_raid_disk].rdev == NULL)
5139                disk = rdev->saved_raid_disk;
5140        else
5141                disk = first;
5142        for ( ; disk <= last ; disk++)
5143                if ((p=conf->disks + disk)->rdev == NULL) {
5144                        clear_bit(In_sync, &rdev->flags);
5145                        rdev->raid_disk = disk;
5146                        err = 0;
5147                        if (rdev->saved_raid_disk != disk)
5148                                conf->fullsync = 1;
5149                        rcu_assign_pointer(p->rdev, rdev);
5150                        break;
5151                }
5152        print_raid5_conf(conf);
5153        return err;
5154}
5155
5156static int raid5_resize(mddev_t *mddev, sector_t sectors)
5157{
5158        /* no resync is happening, and there is enough space
5159         * on all devices, so we can resize.
5160         * We need to make sure resync covers any new space.
5161         * If the array is shrinking we should possibly wait until
5162         * any io in the removed space completes, but it hardly seems
5163         * worth it.
5164         */
5165        sectors &= ~((sector_t)mddev->chunk_sectors - 1);
5166        md_set_array_sectors(mddev, raid5_size(mddev, sectors,
5167                                               mddev->raid_disks));
5168        if (mddev->array_sectors >
5169            raid5_size(mddev, sectors, mddev->raid_disks))
5170                return -EINVAL;
5171        set_capacity(mddev->gendisk, mddev->array_sectors);
5172        revalidate_disk(mddev->gendisk);
5173        if (sectors > mddev->dev_sectors &&
5174            mddev->recovery_cp > mddev->dev_sectors) {
5175                mddev->recovery_cp = mddev->dev_sectors;
5176                set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5177        }
5178        mddev->dev_sectors = sectors;
5179        mddev->resync_max_sectors = sectors;
5180        return 0;
5181}
5182
5183static int check_stripe_cache(mddev_t *mddev)
5184{
5185        /* Can only proceed if there are plenty of stripe_heads.
5186         * We need a minimum of one full stripe,, and for sensible progress
5187         * it is best to have about 4 times that.
5188         * If we require 4 times, then the default 256 4K stripe_heads will
5189         * allow for chunk sizes up to 256K, which is probably OK.
5190         * If the chunk size is greater, user-space should request more
5191         * stripe_heads first.
5192         */
5193        raid5_conf_t *conf = mddev->private;
5194        if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4
5195            > conf->max_nr_stripes ||
5196            ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4
5197            > conf->max_nr_stripes) {
5198                printk(KERN_WARNING "md/raid:%s: reshape: not enough stripes.  Needed %lu\n",
5199                       mdname(mddev),
5200                       ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
5201                        / STRIPE_SIZE)*4);
5202                return 0;
5203        }
5204        return 1;
5205}
5206
5207static int check_reshape(mddev_t *mddev)
5208{
5209        raid5_conf_t *conf = mddev->private;
5210
5211        if (mddev->delta_disks == 0 &&
5212            mddev->new_layout == mddev->layout &&
5213            mddev->new_chunk_sectors == mddev->chunk_sectors)
5214                return 0; /* nothing to do */
5215        if (mddev->bitmap)
5216                /* Cannot grow a bitmap yet */
5217                return -EBUSY;
5218        if (has_failed(conf))
5219                return -EINVAL;
5220        if (mddev->delta_disks < 0) {
5221                /* We might be able to shrink, but the devices must
5222                 * be made bigger first.
5223                 * For raid6, 4 is the minimum size.
5224                 * Otherwise 2 is the minimum
5225                 */
5226                int min = 2;
5227                if (mddev->level == 6)
5228                        min = 4;
5229                if (mddev->raid_disks + mddev->delta_disks < min)
5230                        return -EINVAL;
5231        }
5232
5233        if (!check_stripe_cache(mddev))
5234                return -ENOSPC;
5235
5236        return resize_stripes(conf, conf->raid_disks + mddev->delta_disks);
5237}
5238
5239static int raid5_start_reshape(mddev_t *mddev)
5240{
5241        raid5_conf_t *conf = mddev->private;
5242        mdk_rdev_t *rdev;
5243        int spares = 0;
5244        unsigned long flags;
5245
5246        if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5247                return -EBUSY;
5248
5249        if (!check_stripe_cache(mddev))
5250                return -ENOSPC;
5251
5252        list_for_each_entry(rdev, &mddev->disks, same_set)
5253                if (!test_bit(In_sync, &rdev->flags)
5254                    && !test_bit(Faulty, &rdev->flags))
5255                        spares++;
5256
5257        if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
5258                /* Not enough devices even to make a degraded array
5259                 * of that size
5260                 */
5261                return -EINVAL;
5262
5263        /* Refuse to reduce size of the array.  Any reductions in
5264         * array size must be through explicit setting of array_size
5265         * attribute.
5266         */
5267        if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks)
5268            < mddev->array_sectors) {
5269                printk(KERN_ERR "md/raid:%s: array size must be reduced "
5270                       "before number of disks\n", mdname(mddev));
5271                return -EINVAL;
5272        }
5273
5274        atomic_set(&conf->reshape_stripes, 0);
5275        spin_lock_irq(&conf->device_lock);
5276        conf->previous_raid_disks = conf->raid_disks;
5277        conf->raid_disks += mddev->delta_disks;
5278        conf->prev_chunk_sectors = conf->chunk_sectors;
5279        conf->chunk_sectors = mddev->new_chunk_sectors;
5280        conf->prev_algo = conf->algorithm;
5281        conf->algorithm = mddev->new_layout;
5282        if (mddev->delta_disks < 0)
5283                conf->reshape_progress = raid5_size(mddev, 0, 0);
5284        else
5285                conf->reshape_progress = 0;
5286        conf->reshape_safe = conf->reshape_progress;
5287        conf->generation++;
5288        spin_unlock_irq(&conf->device_lock);
5289
5290        /* Add some new drives, as many as will fit.
5291         * We know there are enough to make the newly sized array work.
5292         * Don't add devices if we are reducing the number of
5293         * devices in the array.  This is because it is not possible
5294         * to correctly record the "partially reconstructed" state of
5295         * such devices during the reshape and confusion could result.
5296         */
5297        if (mddev->delta_disks >= 0) {
5298                int added_devices = 0;
5299                list_for_each_entry(rdev, &mddev->disks, same_set)
5300                        if (rdev->raid_disk < 0 &&
5301                            !test_bit(Faulty, &rdev->flags)) {
5302                                if (raid5_add_disk(mddev, rdev) == 0) {
5303                                        if (rdev->raid_disk
5304                                            >= conf->previous_raid_disks) {
5305                                                set_bit(In_sync, &rdev->flags);
5306                                                added_devices++;
5307                                        } else
5308                                                rdev->recovery_offset = 0;
5309
5310                                        if (sysfs_link_rdev(mddev, rdev))
5311                                                /* Failure here is OK */;
5312                                }
5313                        } else if (rdev->raid_disk >= conf->previous_raid_disks
5314                                   && !test_bit(Faulty, &rdev->flags)) {
5315                                /* This is a spare that was manually added */
5316                                set_bit(In_sync, &rdev->flags);
5317                                added_devices++;
5318                        }
5319
5320                /* When a reshape changes the number of devices,
5321                 * ->degraded is measured against the larger of the
5322                 * pre and post number of devices.
5323                 */
5324                spin_lock_irqsave(&conf->device_lock, flags);
5325                mddev->degraded += (conf->raid_disks - conf->previous_raid_disks)
5326                        - added_devices;
5327                spin_unlock_irqrestore(&conf->device_lock, flags);
5328        }
5329        mddev->raid_disks = conf->raid_disks;
5330        mddev->reshape_position = conf->reshape_progress;
5331        set_bit(MD_CHANGE_DEVS, &mddev->flags);
5332
5333        clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5334        clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
5335        set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
5336        set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
5337        mddev->sync_thread = md_register_thread(md_do_sync, mddev,
5338                                                "reshape");
5339        if (!mddev->sync_thread) {
5340                mddev->recovery = 0;
5341                spin_lock_irq(&conf->device_lock);
5342                mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
5343                conf->reshape_progress = MaxSector;
5344                spin_unlock_irq(&conf->device_lock);
5345                return -EAGAIN;
5346        }
5347        conf->reshape_checkpoint = jiffies;
5348        md_wakeup_thread(mddev->sync_thread);
5349        md_new_event(mddev);
5350        return 0;
5351}
5352
5353/* This is called from the reshape thread and should make any
5354 * changes needed in 'conf'
5355 */
5356static void end_reshape(raid5_conf_t *conf)
5357{
5358
5359        if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
5360
5361                spin_lock_irq(&conf->device_lock);
5362                conf->previous_raid_disks = conf->raid_disks;
5363                conf->reshape_progress = MaxSector;
5364                spin_unlock_irq(&conf->device_lock);
5365                wake_up(&conf->wait_for_overlap);
5366
5367                /* read-ahead size must cover two whole stripes, which is
5368                 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
5369                 */
5370                if (conf->mddev->queue) {
5371                        int data_disks = conf->raid_disks - conf->max_degraded;
5372                        int stripe = data_disks * ((conf->chunk_sectors << 9)
5373                                                   / PAGE_SIZE);
5374                        if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
5375                                conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
5376                }
5377        }
5378}
5379
5380/* This is called from the raid5d thread with mddev_lock held.
5381 * It makes config changes to the device.
5382 */
5383static void raid5_finish_reshape(mddev_t *mddev)
5384{
5385        raid5_conf_t *conf = mddev->private;
5386
5387        if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5388
5389                if (mddev->delta_disks > 0) {
5390                        md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
5391                        set_capacity(mddev->gendisk, mddev->array_sectors);
5392                        revalidate_disk(mddev->gendisk);
5393                } else {
5394                        int d;
5395                        mddev->degraded = conf->raid_disks;
5396                        for (d = 0; d < conf->raid_disks ; d++)
5397                                if (conf->disks[d].rdev &&
5398                                    test_bit(In_sync,
5399                                             &conf->disks[d].rdev->flags))
5400                                        mddev->degraded--;
5401                        for (d = conf->raid_disks ;
5402                             d < conf->raid_disks - mddev->delta_disks;
5403                             d++) {
5404                                mdk_rdev_t *rdev = conf->disks[d].rdev;
5405                                if (rdev && raid5_remove_disk(mddev, d) == 0) {
5406                                        sysfs_unlink_rdev(mddev, rdev);
5407                                        rdev->raid_disk = -1;
5408                                }
5409                        }
5410                }
5411                mddev->layout = conf->algorithm;
5412                mddev->chunk_sectors = conf->chunk_sectors;
5413                mddev->reshape_position = MaxSector;
5414                mddev->delta_disks = 0;
5415        }
5416}
5417
5418static void raid5_quiesce(mddev_t *mddev, int state)
5419{
5420        raid5_conf_t *conf = mddev->private;
5421
5422        switch(state) {
5423        case 2: /* resume for a suspend */
5424                wake_up(&conf->wait_for_overlap);
5425                break;
5426
5427        case 1: /* stop all writes */
5428                spin_lock_irq(&conf->device_lock);
5429                /* '2' tells resync/reshape to pause so that all
5430                 * active stripes can drain
5431                 */
5432                conf->quiesce = 2;
5433                wait_event_lock_irq(conf->wait_for_stripe,
5434                                    atomic_read(&conf->active_stripes) == 0 &&
5435                                    atomic_read(&conf->active_aligned_reads) == 0,
5436                                    conf->device_lock, /* nothing */);
5437                conf->quiesce = 1;
5438                spin_unlock_irq(&conf->device_lock);
5439                /* allow reshape to continue */
5440                wake_up(&conf->wait_for_overlap);
5441                break;
5442
5443        case 0: /* re-enable writes */
5444                spin_lock_irq(&conf->device_lock);
5445                conf->quiesce = 0;
5446                wake_up(&conf->wait_for_stripe);
5447                wake_up(&conf->wait_for_overlap);
5448                spin_unlock_irq(&conf->device_lock);
5449                break;
5450        }
5451}
5452
5453
5454static void *raid45_takeover_raid0(mddev_t *mddev, int level)
5455{
5456        struct raid0_private_data *raid0_priv = mddev->private;
5457        sector_t sectors;
5458
5459        /* for raid0 takeover only one zone is supported */
5460        if (raid0_priv->nr_strip_zones > 1) {
5461                printk(KERN_ERR "md/raid:%s: cannot takeover raid0 with more than one zone.\n",
5462                       mdname(mddev));
5463                return ERR_PTR(-EINVAL);
5464        }
5465
5466        sectors = raid0_priv->strip_zone[0].zone_end;
5467        sector_div(sectors, raid0_priv->strip_zone[0].nb_dev);
5468        mddev->dev_sectors = sectors;
5469        mddev->new_level = level;
5470        mddev->new_layout = ALGORITHM_PARITY_N;
5471        mddev->new_chunk_sectors = mddev->chunk_sectors;
5472        mddev->raid_disks += 1;
5473        mddev->delta_disks = 1;
5474        /* make sure it will be not marked as dirty */
5475        mddev->recovery_cp = MaxSector;
5476
5477        return setup_conf(mddev);
5478}
5479
5480
5481static void *raid5_takeover_raid1(mddev_t *mddev)
5482{
5483        int chunksect;
5484
5485        if (mddev->raid_disks != 2 ||
5486            mddev->degraded > 1)
5487                return ERR_PTR(-EINVAL);
5488
5489        /* Should check if there are write-behind devices? */
5490
5491        chunksect = 64*2; /* 64K by default */
5492
5493        /* The array must be an exact multiple of chunksize */
5494        while (chunksect && (mddev->array_sectors & (chunksect-1)))
5495                chunksect >>= 1;
5496
5497        if ((chunksect<<9) < STRIPE_SIZE)
5498                /* array size does not allow a suitable chunk size */
5499                return ERR_PTR(-EINVAL);
5500
5501        mddev->new_level = 5;
5502        mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC;
5503        mddev->new_chunk_sectors = chunksect;
5504
5505        return setup_conf(mddev);
5506}
5507
5508static void *raid5_takeover_raid6(mddev_t *mddev)
5509{
5510        int new_layout;
5511
5512        switch (mddev->layout) {
5513        case ALGORITHM_LEFT_ASYMMETRIC_6:
5514                new_layout = ALGORITHM_LEFT_ASYMMETRIC;
5515                break;
5516        case ALGORITHM_RIGHT_ASYMMETRIC_6:
5517                new_layout = ALGORITHM_RIGHT_ASYMMETRIC;
5518                break;
5519        case ALGORITHM_LEFT_SYMMETRIC_6:
5520                new_layout = ALGORITHM_LEFT_SYMMETRIC;
5521                break;
5522        case ALGORITHM_RIGHT_SYMMETRIC_6:
5523                new_layout = ALGORITHM_RIGHT_SYMMETRIC;
5524                break;
5525        case ALGORITHM_PARITY_0_6:
5526                new_layout = ALGORITHM_PARITY_0;
5527                break;
5528        case ALGORITHM_PARITY_N:
5529                new_layout = ALGORITHM_PARITY_N;
5530                break;
5531        default:
5532                return ERR_PTR(-EINVAL);
5533        }
5534        mddev->new_level = 5;
5535        mddev->new_layout = new_layout;
5536        mddev->delta_disks = -1;
5537        mddev->raid_disks -= 1;
5538        return setup_conf(mddev);
5539}
5540
5541
5542static int raid5_check_reshape(mddev_t *mddev)
5543{
5544        /* For a 2-drive array, the layout and chunk size can be changed
5545         * immediately as not restriping is needed.
5546         * For larger arrays we record the new value - after validation
5547         * to be used by a reshape pass.
5548         */
5549        raid5_conf_t *conf = mddev->private;
5550        int new_chunk = mddev->new_chunk_sectors;
5551
5552        if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout))
5553                return -EINVAL;
5554        if (new_chunk > 0) {
5555                if (!is_power_of_2(new_chunk))
5556                        return -EINVAL;
5557                if (new_chunk < (PAGE_SIZE>>9))
5558                        return -EINVAL;
5559                if (mddev->array_sectors & (new_chunk-1))
5560                        /* not factor of array size */
5561                        return -EINVAL;
5562        }
5563
5564        /* They look valid */
5565
5566        if (mddev->raid_disks == 2) {
5567                /* can make the change immediately */
5568                if (mddev->new_layout >= 0) {
5569                        conf->algorithm = mddev->new_layout;
5570                        mddev->layout = mddev->new_layout;
5571                }
5572                if (new_chunk > 0) {
5573                        conf->chunk_sectors = new_chunk ;
5574                        mddev->chunk_sectors = new_chunk;
5575                }
5576                set_bit(MD_CHANGE_DEVS, &mddev->flags);
5577                md_wakeup_thread(mddev->thread);
5578        }
5579        return check_reshape(mddev);
5580}
5581
5582static int raid6_check_reshape(mddev_t *mddev)
5583{
5584        int new_chunk = mddev->new_chunk_sectors;
5585
5586        if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout))
5587                return -EINVAL;
5588        if (new_chunk > 0) {
5589                if (!is_power_of_2(new_chunk))
5590                        return -EINVAL;
5591                if (new_chunk < (PAGE_SIZE >> 9))
5592                        return -EINVAL;
5593                if (mddev->array_sectors & (new_chunk-1))
5594                        /* not factor of array size */
5595                        return -EINVAL;
5596        }
5597
5598        /* They look valid */
5599        return check_reshape(mddev);
5600}
5601
5602static void *raid5_takeover(mddev_t *mddev)
5603{
5604        /* raid5 can take over:
5605         *  raid0 - if there is only one strip zone - make it a raid4 layout
5606         *  raid1 - if there are two drives.  We need to know the chunk size
5607         *  raid4 - trivial - just use a raid4 layout.
5608         *  raid6 - Providing it is a *_6 layout
5609         */
5610        if (mddev->level == 0)
5611                return raid45_takeover_raid0(mddev, 5);
5612        if (mddev->level == 1)
5613                return raid5_takeover_raid1(mddev);
5614        if (mddev->level == 4) {
5615                mddev->new_layout = ALGORITHM_PARITY_N;
5616                mddev->new_level = 5;
5617                return setup_conf(mddev);
5618        }
5619        if (mddev->level == 6)
5620                return raid5_takeover_raid6(mddev);
5621
5622        return ERR_PTR(-EINVAL);
5623}
5624
5625static void *raid4_takeover(mddev_t *mddev)
5626{
5627        /* raid4 can take over:
5628         *  raid0 - if there is only one strip zone
5629         *  raid5 - if layout is right
5630         */
5631        if (mddev->level == 0)
5632                return raid45_takeover_raid0(mddev, 4);
5633        if (mddev->level == 5 &&
5634            mddev->layout == ALGORITHM_PARITY_N) {
5635                mddev->new_layout = 0;
5636                mddev->new_level = 4;
5637                return setup_conf(mddev);
5638        }
5639        return ERR_PTR(-EINVAL);
5640}
5641
5642static struct mdk_personality raid5_personality;
5643
5644static void *raid6_takeover(mddev_t *mddev)
5645{
5646        /* Currently can only take over a raid5.  We map the
5647         * personality to an equivalent raid6 personality
5648         * with the Q block at the end.
5649         */
5650        int new_layout;
5651
5652        if (mddev->pers != &raid5_personality)
5653                return ERR_PTR(-EINVAL);
5654        if (mddev->degraded > 1)
5655                return ERR_PTR(-EINVAL);
5656        if (mddev->raid_disks > 253)
5657                return ERR_PTR(-EINVAL);
5658        if (mddev->raid_disks < 3)
5659                return ERR_PTR(-EINVAL);
5660
5661        switch (mddev->layout) {
5662        case ALGORITHM_LEFT_ASYMMETRIC:
5663                new_layout = ALGORITHM_LEFT_ASYMMETRIC_6;
5664                break;
5665        case ALGORITHM_RIGHT_ASYMMETRIC:
5666                new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6;
5667                break;
5668        case ALGORITHM_LEFT_SYMMETRIC:
5669                new_layout = ALGORITHM_LEFT_SYMMETRIC_6;
5670                break;
5671        case ALGORITHM_RIGHT_SYMMETRIC:
5672                new_layout = ALGORITHM_RIGHT_SYMMETRIC_6;
5673                break;
5674        case ALGORITHM_PARITY_0:
5675                new_layout = ALGORITHM_PARITY_0_6;
5676                break;
5677        case ALGORITHM_PARITY_N:
5678                new_layout = ALGORITHM_PARITY_N;
5679                break;
5680        default:
5681                return ERR_PTR(-EINVAL);
5682        }
5683        mddev->new_level = 6;
5684        mddev->new_layout = new_layout;
5685        mddev->delta_disks = 1;
5686        mddev->raid_disks += 1;
5687        return setup_conf(mddev);
5688}
5689
5690
5691static struct mdk_personality raid6_personality =
5692{
5693        .name           = "raid6",
5694        .level          = 6,
5695        .owner          = THIS_MODULE,
5696        .make_request   = make_request,
5697        .run            = run,
5698        .stop           = stop,
5699        .status         = status,
5700        .error_handler  = error,
5701        .hot_add_disk   = raid5_add_disk,
5702        .hot_remove_disk= raid5_remove_disk,
5703        .spare_active   = raid5_spare_active,
5704        .sync_request   = sync_request,
5705        .resize         = raid5_resize,
5706        .size           = raid5_size,
5707        .check_reshape  = raid6_check_reshape,
5708        .start_reshape  = raid5_start_reshape,
5709        .finish_reshape = raid5_finish_reshape,
5710        .quiesce        = raid5_quiesce,
5711        .takeover       = raid6_takeover,
5712};
5713static struct mdk_personality raid5_personality =
5714{
5715        .name           = "raid5",
5716        .level          = 5,
5717        .owner          = THIS_MODULE,
5718        .make_request   = make_request,
5719        .run            = run,
5720        .stop           = stop,
5721        .status         = status,
5722        .error_handler  = error,
5723        .hot_add_disk   = raid5_add_disk,
5724        .hot_remove_disk= raid5_remove_disk,
5725        .spare_active   = raid5_spare_active,
5726        .sync_request   = sync_request,
5727        .resize         = raid5_resize,
5728        .size           = raid5_size,
5729        .check_reshape  = raid5_check_reshape,
5730        .start_reshape  = raid5_start_reshape,
5731        .finish_reshape = raid5_finish_reshape,
5732        .quiesce        = raid5_quiesce,
5733        .takeover       = raid5_takeover,
5734};
5735
5736static struct mdk_personality raid4_personality =
5737{
5738        .name           = "raid4",
5739        .level          = 4,
5740        .owner          = THIS_MODULE,
5741        .make_request   = make_request,
5742        .run            = run,
5743        .stop           = stop,
5744        .status         = status,
5745        .error_handler  = error,
5746        .hot_add_disk   = raid5_add_disk,
5747        .hot_remove_disk= raid5_remove_disk,
5748        .spare_active   = raid5_spare_active,
5749        .sync_request   = sync_request,
5750        .resize         = raid5_resize,
5751        .size           = raid5_size,
5752        .check_reshape  = raid5_check_reshape,
5753        .start_reshape  = raid5_start_reshape,
5754        .finish_reshape = raid5_finish_reshape,
5755        .quiesce        = raid5_quiesce,
5756        .takeover       = raid4_takeover,
5757};
5758
5759static int __init raid5_init(void)
5760{
5761        register_md_personality(&raid6_personality);
5762        register_md_personality(&raid5_personality);
5763        register_md_personality(&raid4_personality);
5764        return 0;
5765}
5766
5767static void raid5_exit(void)
5768{
5769        unregister_md_personality(&raid6_personality);
5770        unregister_md_personality(&raid5_personality);
5771        unregister_md_personality(&raid4_personality);
5772}
5773
5774module_init(raid5_init);
5775module_exit(raid5_exit);
5776MODULE_LICENSE("GPL");
5777MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD");
5778MODULE_ALIAS("md-personality-4"); /* RAID5 */
5779MODULE_ALIAS("md-raid5");
5780MODULE_ALIAS("md-raid4");
5781MODULE_ALIAS("md-level-5");
5782MODULE_ALIAS("md-level-4");
5783MODULE_ALIAS("md-personality-8"); /* RAID6 */
5784MODULE_ALIAS("md-raid6");
5785MODULE_ALIAS("md-level-6");
5786
5787/* This used to be two separate modules, they were: */
5788MODULE_ALIAS("raid5");
5789MODULE_ALIAS("raid6");
5790