linux/drivers/md/raid10.c
<<
>>
Prefs
   1/*
   2 * raid10.c : Multiple Devices driver for Linux
   3 *
   4 * Copyright (C) 2000-2004 Neil Brown
   5 *
   6 * RAID-10 support for md.
   7 *
   8 * Base on code in raid1.c.  See raid1.c for further copyright information.
   9 *
  10 *
  11 * This program is free software; you can redistribute it and/or modify
  12 * it under the terms of the GNU General Public License as published by
  13 * the Free Software Foundation; either version 2, or (at your option)
  14 * any later version.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * (for example /usr/src/linux/COPYING); if not, write to the Free
  18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  19 */
  20
  21#include <linux/slab.h>
  22#include <linux/delay.h>
  23#include <linux/blkdev.h>
  24#include <linux/module.h>
  25#include <linux/seq_file.h>
  26#include <linux/ratelimit.h>
  27#include "md.h"
  28#include "raid10.h"
  29#include "raid0.h"
  30#include "bitmap.h"
  31
  32/*
  33 * RAID10 provides a combination of RAID0 and RAID1 functionality.
  34 * The layout of data is defined by
  35 *    chunk_size
  36 *    raid_disks
  37 *    near_copies (stored in low byte of layout)
  38 *    far_copies (stored in second byte of layout)
  39 *    far_offset (stored in bit 16 of layout )
  40 *
  41 * The data to be stored is divided into chunks using chunksize.
  42 * Each device is divided into far_copies sections.
  43 * In each section, chunks are laid out in a style similar to raid0, but
  44 * near_copies copies of each chunk is stored (each on a different drive).
  45 * The starting device for each section is offset near_copies from the starting
  46 * device of the previous section.
  47 * Thus they are (near_copies*far_copies) of each chunk, and each is on a different
  48 * drive.
  49 * near_copies and far_copies must be at least one, and their product is at most
  50 * raid_disks.
  51 *
  52 * If far_offset is true, then the far_copies are handled a bit differently.
  53 * The copies are still in different stripes, but instead of be very far apart
  54 * on disk, there are adjacent stripes.
  55 */
  56
  57/*
  58 * Number of guaranteed r10bios in case of extreme VM load:
  59 */
  60#define NR_RAID10_BIOS 256
  61
  62/* When there are this many requests queue to be written by
  63 * the raid10 thread, we become 'congested' to provide back-pressure
  64 * for writeback.
  65 */
  66static int max_queued_requests = 1024;
  67
  68static void allow_barrier(struct r10conf *conf);
  69static void lower_barrier(struct r10conf *conf);
  70static int enough(struct r10conf *conf, int ignore);
  71
  72static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
  73{
  74        struct r10conf *conf = data;
  75        int size = offsetof(struct r10bio, devs[conf->copies]);
  76
  77        /* allocate a r10bio with room for raid_disks entries in the
  78         * bios array */
  79        return kzalloc(size, gfp_flags);
  80}
  81
  82static void r10bio_pool_free(void *r10_bio, void *data)
  83{
  84        kfree(r10_bio);
  85}
  86
  87/* Maximum size of each resync request */
  88#define RESYNC_BLOCK_SIZE (64*1024)
  89#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
  90/* amount of memory to reserve for resync requests */
  91#define RESYNC_WINDOW (1024*1024)
  92/* maximum number of concurrent requests, memory permitting */
  93#define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
  94
  95/*
  96 * When performing a resync, we need to read and compare, so
  97 * we need as many pages are there are copies.
  98 * When performing a recovery, we need 2 bios, one for read,
  99 * one for write (we recover only one drive per r10buf)
 100 *
 101 */
 102static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
 103{
 104        struct r10conf *conf = data;
 105        struct page *page;
 106        struct r10bio *r10_bio;
 107        struct bio *bio;
 108        int i, j;
 109        int nalloc;
 110
 111        r10_bio = r10bio_pool_alloc(gfp_flags, conf);
 112        if (!r10_bio)
 113                return NULL;
 114
 115        if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
 116                nalloc = conf->copies; /* resync */
 117        else
 118                nalloc = 2; /* recovery */
 119
 120        /*
 121         * Allocate bios.
 122         */
 123        for (j = nalloc ; j-- ; ) {
 124                bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
 125                if (!bio)
 126                        goto out_free_bio;
 127                r10_bio->devs[j].bio = bio;
 128                if (!conf->have_replacement)
 129                        continue;
 130                bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
 131                if (!bio)
 132                        goto out_free_bio;
 133                r10_bio->devs[j].repl_bio = bio;
 134        }
 135        /*
 136         * Allocate RESYNC_PAGES data pages and attach them
 137         * where needed.
 138         */
 139        for (j = 0 ; j < nalloc; j++) {
 140                struct bio *rbio = r10_bio->devs[j].repl_bio;
 141                bio = r10_bio->devs[j].bio;
 142                for (i = 0; i < RESYNC_PAGES; i++) {
 143                        if (j == 1 && !test_bit(MD_RECOVERY_SYNC,
 144                                                &conf->mddev->recovery)) {
 145                                /* we can share bv_page's during recovery */
 146                                struct bio *rbio = r10_bio->devs[0].bio;
 147                                page = rbio->bi_io_vec[i].bv_page;
 148                                get_page(page);
 149                        } else
 150                                page = alloc_page(gfp_flags);
 151                        if (unlikely(!page))
 152                                goto out_free_pages;
 153
 154                        bio->bi_io_vec[i].bv_page = page;
 155                        if (rbio)
 156                                rbio->bi_io_vec[i].bv_page = page;
 157                }
 158        }
 159
 160        return r10_bio;
 161
 162out_free_pages:
 163        for ( ; i > 0 ; i--)
 164                safe_put_page(bio->bi_io_vec[i-1].bv_page);
 165        while (j--)
 166                for (i = 0; i < RESYNC_PAGES ; i++)
 167                        safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page);
 168        j = -1;
 169out_free_bio:
 170        while (++j < nalloc) {
 171                bio_put(r10_bio->devs[j].bio);
 172                if (r10_bio->devs[j].repl_bio)
 173                        bio_put(r10_bio->devs[j].repl_bio);
 174        }
 175        r10bio_pool_free(r10_bio, conf);
 176        return NULL;
 177}
 178
 179static void r10buf_pool_free(void *__r10_bio, void *data)
 180{
 181        int i;
 182        struct r10conf *conf = data;
 183        struct r10bio *r10bio = __r10_bio;
 184        int j;
 185
 186        for (j=0; j < conf->copies; j++) {
 187                struct bio *bio = r10bio->devs[j].bio;
 188                if (bio) {
 189                        for (i = 0; i < RESYNC_PAGES; i++) {
 190                                safe_put_page(bio->bi_io_vec[i].bv_page);
 191                                bio->bi_io_vec[i].bv_page = NULL;
 192                        }
 193                        bio_put(bio);
 194                }
 195                bio = r10bio->devs[j].repl_bio;
 196                if (bio)
 197                        bio_put(bio);
 198        }
 199        r10bio_pool_free(r10bio, conf);
 200}
 201
 202static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio)
 203{
 204        int i;
 205
 206        for (i = 0; i < conf->copies; i++) {
 207                struct bio **bio = & r10_bio->devs[i].bio;
 208                if (!BIO_SPECIAL(*bio))
 209                        bio_put(*bio);
 210                *bio = NULL;
 211                bio = &r10_bio->devs[i].repl_bio;
 212                if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio))
 213                        bio_put(*bio);
 214                *bio = NULL;
 215        }
 216}
 217
 218static void free_r10bio(struct r10bio *r10_bio)
 219{
 220        struct r10conf *conf = r10_bio->mddev->private;
 221
 222        put_all_bios(conf, r10_bio);
 223        mempool_free(r10_bio, conf->r10bio_pool);
 224}
 225
 226static void put_buf(struct r10bio *r10_bio)
 227{
 228        struct r10conf *conf = r10_bio->mddev->private;
 229
 230        mempool_free(r10_bio, conf->r10buf_pool);
 231
 232        lower_barrier(conf);
 233}
 234
 235static void reschedule_retry(struct r10bio *r10_bio)
 236{
 237        unsigned long flags;
 238        struct mddev *mddev = r10_bio->mddev;
 239        struct r10conf *conf = mddev->private;
 240
 241        spin_lock_irqsave(&conf->device_lock, flags);
 242        list_add(&r10_bio->retry_list, &conf->retry_list);
 243        conf->nr_queued ++;
 244        spin_unlock_irqrestore(&conf->device_lock, flags);
 245
 246        /* wake up frozen array... */
 247        wake_up(&conf->wait_barrier);
 248
 249        md_wakeup_thread(mddev->thread);
 250}
 251
 252/*
 253 * raid_end_bio_io() is called when we have finished servicing a mirrored
 254 * operation and are ready to return a success/failure code to the buffer
 255 * cache layer.
 256 */
 257static void raid_end_bio_io(struct r10bio *r10_bio)
 258{
 259        struct bio *bio = r10_bio->master_bio;
 260        int done;
 261        struct r10conf *conf = r10_bio->mddev->private;
 262
 263        if (bio->bi_phys_segments) {
 264                unsigned long flags;
 265                spin_lock_irqsave(&conf->device_lock, flags);
 266                bio->bi_phys_segments--;
 267                done = (bio->bi_phys_segments == 0);
 268                spin_unlock_irqrestore(&conf->device_lock, flags);
 269        } else
 270                done = 1;
 271        if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
 272                clear_bit(BIO_UPTODATE, &bio->bi_flags);
 273        if (done) {
 274                bio_endio(bio, 0);
 275                /*
 276                 * Wake up any possible resync thread that waits for the device
 277                 * to go idle.
 278                 */
 279                allow_barrier(conf);
 280        }
 281        free_r10bio(r10_bio);
 282}
 283
 284/*
 285 * Update disk head position estimator based on IRQ completion info.
 286 */
 287static inline void update_head_pos(int slot, struct r10bio *r10_bio)
 288{
 289        struct r10conf *conf = r10_bio->mddev->private;
 290
 291        conf->mirrors[r10_bio->devs[slot].devnum].head_position =
 292                r10_bio->devs[slot].addr + (r10_bio->sectors);
 293}
 294
 295/*
 296 * Find the disk number which triggered given bio
 297 */
 298static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
 299                         struct bio *bio, int *slotp, int *replp)
 300{
 301        int slot;
 302        int repl = 0;
 303
 304        for (slot = 0; slot < conf->copies; slot++) {
 305                if (r10_bio->devs[slot].bio == bio)
 306                        break;
 307                if (r10_bio->devs[slot].repl_bio == bio) {
 308                        repl = 1;
 309                        break;
 310                }
 311        }
 312
 313        BUG_ON(slot == conf->copies);
 314        update_head_pos(slot, r10_bio);
 315
 316        if (slotp)
 317                *slotp = slot;
 318        if (replp)
 319                *replp = repl;
 320        return r10_bio->devs[slot].devnum;
 321}
 322
 323static void raid10_end_read_request(struct bio *bio, int error)
 324{
 325        int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
 326        struct r10bio *r10_bio = bio->bi_private;
 327        int slot, dev;
 328        struct md_rdev *rdev;
 329        struct r10conf *conf = r10_bio->mddev->private;
 330
 331
 332        slot = r10_bio->read_slot;
 333        dev = r10_bio->devs[slot].devnum;
 334        rdev = r10_bio->devs[slot].rdev;
 335        /*
 336         * this branch is our 'one mirror IO has finished' event handler:
 337         */
 338        update_head_pos(slot, r10_bio);
 339
 340        if (uptodate) {
 341                /*
 342                 * Set R10BIO_Uptodate in our master bio, so that
 343                 * we will return a good error code to the higher
 344                 * levels even if IO on some other mirrored buffer fails.
 345                 *
 346                 * The 'master' represents the composite IO operation to
 347                 * user-side. So if something waits for IO, then it will
 348                 * wait for the 'master' bio.
 349                 */
 350                set_bit(R10BIO_Uptodate, &r10_bio->state);
 351        } else {
 352                /* If all other devices that store this block have
 353                 * failed, we want to return the error upwards rather
 354                 * than fail the last device.  Here we redefine
 355                 * "uptodate" to mean "Don't want to retry"
 356                 */
 357                unsigned long flags;
 358                spin_lock_irqsave(&conf->device_lock, flags);
 359                if (!enough(conf, rdev->raid_disk))
 360                        uptodate = 1;
 361                spin_unlock_irqrestore(&conf->device_lock, flags);
 362        }
 363        if (uptodate) {
 364                raid_end_bio_io(r10_bio);
 365                rdev_dec_pending(rdev, conf->mddev);
 366        } else {
 367                /*
 368                 * oops, read error - keep the refcount on the rdev
 369                 */
 370                char b[BDEVNAME_SIZE];
 371                printk_ratelimited(KERN_ERR
 372                                   "md/raid10:%s: %s: rescheduling sector %llu\n",
 373                                   mdname(conf->mddev),
 374                                   bdevname(rdev->bdev, b),
 375                                   (unsigned long long)r10_bio->sector);
 376                set_bit(R10BIO_ReadError, &r10_bio->state);
 377                reschedule_retry(r10_bio);
 378        }
 379}
 380
 381static void close_write(struct r10bio *r10_bio)
 382{
 383        /* clear the bitmap if all writes complete successfully */
 384        bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
 385                        r10_bio->sectors,
 386                        !test_bit(R10BIO_Degraded, &r10_bio->state),
 387                        0);
 388        md_write_end(r10_bio->mddev);
 389}
 390
 391static void one_write_done(struct r10bio *r10_bio)
 392{
 393        if (atomic_dec_and_test(&r10_bio->remaining)) {
 394                if (test_bit(R10BIO_WriteError, &r10_bio->state))
 395                        reschedule_retry(r10_bio);
 396                else {
 397                        close_write(r10_bio);
 398                        if (test_bit(R10BIO_MadeGood, &r10_bio->state))
 399                                reschedule_retry(r10_bio);
 400                        else
 401                                raid_end_bio_io(r10_bio);
 402                }
 403        }
 404}
 405
 406static void raid10_end_write_request(struct bio *bio, int error)
 407{
 408        int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
 409        struct r10bio *r10_bio = bio->bi_private;
 410        int dev;
 411        int dec_rdev = 1;
 412        struct r10conf *conf = r10_bio->mddev->private;
 413        int slot, repl;
 414        struct md_rdev *rdev = NULL;
 415
 416        dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
 417
 418        if (repl)
 419                rdev = conf->mirrors[dev].replacement;
 420        if (!rdev) {
 421                smp_rmb();
 422                repl = 0;
 423                rdev = conf->mirrors[dev].rdev;
 424        }
 425        /*
 426         * this branch is our 'one mirror IO has finished' event handler:
 427         */
 428        if (!uptodate) {
 429                if (repl)
 430                        /* Never record new bad blocks to replacement,
 431                         * just fail it.
 432                         */
 433                        md_error(rdev->mddev, rdev);
 434                else {
 435                        set_bit(WriteErrorSeen, &rdev->flags);
 436                        if (!test_and_set_bit(WantReplacement, &rdev->flags))
 437                                set_bit(MD_RECOVERY_NEEDED,
 438                                        &rdev->mddev->recovery);
 439                        set_bit(R10BIO_WriteError, &r10_bio->state);
 440                        dec_rdev = 0;
 441                }
 442        } else {
 443                /*
 444                 * Set R10BIO_Uptodate in our master bio, so that
 445                 * we will return a good error code for to the higher
 446                 * levels even if IO on some other mirrored buffer fails.
 447                 *
 448                 * The 'master' represents the composite IO operation to
 449                 * user-side. So if something waits for IO, then it will
 450                 * wait for the 'master' bio.
 451                 */
 452                sector_t first_bad;
 453                int bad_sectors;
 454
 455                set_bit(R10BIO_Uptodate, &r10_bio->state);
 456
 457                /* Maybe we can clear some bad blocks. */
 458                if (is_badblock(rdev,
 459                                r10_bio->devs[slot].addr,
 460                                r10_bio->sectors,
 461                                &first_bad, &bad_sectors)) {
 462                        bio_put(bio);
 463                        if (repl)
 464                                r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
 465                        else
 466                                r10_bio->devs[slot].bio = IO_MADE_GOOD;
 467                        dec_rdev = 0;
 468                        set_bit(R10BIO_MadeGood, &r10_bio->state);
 469                }
 470        }
 471
 472        /*
 473         *
 474         * Let's see if all mirrored write operations have finished
 475         * already.
 476         */
 477        one_write_done(r10_bio);
 478        if (dec_rdev)
 479                rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
 480}
 481
 482/*
 483 * RAID10 layout manager
 484 * As well as the chunksize and raid_disks count, there are two
 485 * parameters: near_copies and far_copies.
 486 * near_copies * far_copies must be <= raid_disks.
 487 * Normally one of these will be 1.
 488 * If both are 1, we get raid0.
 489 * If near_copies == raid_disks, we get raid1.
 490 *
 491 * Chunks are laid out in raid0 style with near_copies copies of the
 492 * first chunk, followed by near_copies copies of the next chunk and
 493 * so on.
 494 * If far_copies > 1, then after 1/far_copies of the array has been assigned
 495 * as described above, we start again with a device offset of near_copies.
 496 * So we effectively have another copy of the whole array further down all
 497 * the drives, but with blocks on different drives.
 498 * With this layout, and block is never stored twice on the one device.
 499 *
 500 * raid10_find_phys finds the sector offset of a given virtual sector
 501 * on each device that it is on.
 502 *
 503 * raid10_find_virt does the reverse mapping, from a device and a
 504 * sector offset to a virtual address
 505 */
 506
 507static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio)
 508{
 509        int n,f;
 510        sector_t sector;
 511        sector_t chunk;
 512        sector_t stripe;
 513        int dev;
 514
 515        int slot = 0;
 516
 517        /* now calculate first sector/dev */
 518        chunk = r10bio->sector >> conf->chunk_shift;
 519        sector = r10bio->sector & conf->chunk_mask;
 520
 521        chunk *= conf->near_copies;
 522        stripe = chunk;
 523        dev = sector_div(stripe, conf->raid_disks);
 524        if (conf->far_offset)
 525                stripe *= conf->far_copies;
 526
 527        sector += stripe << conf->chunk_shift;
 528
 529        /* and calculate all the others */
 530        for (n=0; n < conf->near_copies; n++) {
 531                int d = dev;
 532                sector_t s = sector;
 533                r10bio->devs[slot].addr = sector;
 534                r10bio->devs[slot].devnum = d;
 535                slot++;
 536
 537                for (f = 1; f < conf->far_copies; f++) {
 538                        d += conf->near_copies;
 539                        if (d >= conf->raid_disks)
 540                                d -= conf->raid_disks;
 541                        s += conf->stride;
 542                        r10bio->devs[slot].devnum = d;
 543                        r10bio->devs[slot].addr = s;
 544                        slot++;
 545                }
 546                dev++;
 547                if (dev >= conf->raid_disks) {
 548                        dev = 0;
 549                        sector += (conf->chunk_mask + 1);
 550                }
 551        }
 552        BUG_ON(slot != conf->copies);
 553}
 554
 555static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
 556{
 557        sector_t offset, chunk, vchunk;
 558
 559        offset = sector & conf->chunk_mask;
 560        if (conf->far_offset) {
 561                int fc;
 562                chunk = sector >> conf->chunk_shift;
 563                fc = sector_div(chunk, conf->far_copies);
 564                dev -= fc * conf->near_copies;
 565                if (dev < 0)
 566                        dev += conf->raid_disks;
 567        } else {
 568                while (sector >= conf->stride) {
 569                        sector -= conf->stride;
 570                        if (dev < conf->near_copies)
 571                                dev += conf->raid_disks - conf->near_copies;
 572                        else
 573                                dev -= conf->near_copies;
 574                }
 575                chunk = sector >> conf->chunk_shift;
 576        }
 577        vchunk = chunk * conf->raid_disks + dev;
 578        sector_div(vchunk, conf->near_copies);
 579        return (vchunk << conf->chunk_shift) + offset;
 580}
 581
 582/**
 583 *      raid10_mergeable_bvec -- tell bio layer if a two requests can be merged
 584 *      @q: request queue
 585 *      @bvm: properties of new bio
 586 *      @biovec: the request that could be merged to it.
 587 *
 588 *      Return amount of bytes we can accept at this offset
 589 *      If near_copies == raid_disk, there are no striping issues,
 590 *      but in that case, the function isn't called at all.
 591 */
 592static int raid10_mergeable_bvec(struct request_queue *q,
 593                                 struct bvec_merge_data *bvm,
 594                                 struct bio_vec *biovec)
 595{
 596        struct mddev *mddev = q->queuedata;
 597        sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
 598        int max;
 599        unsigned int chunk_sectors = mddev->chunk_sectors;
 600        unsigned int bio_sectors = bvm->bi_size >> 9;
 601
 602        max =  (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
 603        if (max < 0) max = 0; /* bio_add cannot handle a negative return */
 604        if (max <= biovec->bv_len && bio_sectors == 0)
 605                return biovec->bv_len;
 606        else
 607                return max;
 608}
 609
 610/*
 611 * This routine returns the disk from which the requested read should
 612 * be done. There is a per-array 'next expected sequential IO' sector
 613 * number - if this matches on the next IO then we use the last disk.
 614 * There is also a per-disk 'last know head position' sector that is
 615 * maintained from IRQ contexts, both the normal and the resync IO
 616 * completion handlers update this position correctly. If there is no
 617 * perfect sequential match then we pick the disk whose head is closest.
 618 *
 619 * If there are 2 mirrors in the same 2 devices, performance degrades
 620 * because position is mirror, not device based.
 621 *
 622 * The rdev for the device selected will have nr_pending incremented.
 623 */
 624
 625/*
 626 * FIXME: possibly should rethink readbalancing and do it differently
 627 * depending on near_copies / far_copies geometry.
 628 */
 629static struct md_rdev *read_balance(struct r10conf *conf,
 630                                    struct r10bio *r10_bio,
 631                                    int *max_sectors)
 632{
 633        const sector_t this_sector = r10_bio->sector;
 634        int disk, slot;
 635        int sectors = r10_bio->sectors;
 636        int best_good_sectors;
 637        sector_t new_distance, best_dist;
 638        struct md_rdev *rdev, *best_rdev;
 639        int do_balance;
 640        int best_slot;
 641
 642        raid10_find_phys(conf, r10_bio);
 643        rcu_read_lock();
 644retry:
 645        sectors = r10_bio->sectors;
 646        best_slot = -1;
 647        best_rdev = NULL;
 648        best_dist = MaxSector;
 649        best_good_sectors = 0;
 650        do_balance = 1;
 651        /*
 652         * Check if we can balance. We can balance on the whole
 653         * device if no resync is going on (recovery is ok), or below
 654         * the resync window. We take the first readable disk when
 655         * above the resync window.
 656         */
 657        if (conf->mddev->recovery_cp < MaxSector
 658            && (this_sector + sectors >= conf->next_resync))
 659                do_balance = 0;
 660
 661        for (slot = 0; slot < conf->copies ; slot++) {
 662                sector_t first_bad;
 663                int bad_sectors;
 664                sector_t dev_sector;
 665
 666                if (r10_bio->devs[slot].bio == IO_BLOCKED)
 667                        continue;
 668                disk = r10_bio->devs[slot].devnum;
 669                rdev = rcu_dereference(conf->mirrors[disk].replacement);
 670                if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
 671                    r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
 672                        rdev = rcu_dereference(conf->mirrors[disk].rdev);
 673                if (rdev == NULL)
 674                        continue;
 675                if (test_bit(Faulty, &rdev->flags))
 676                        continue;
 677                if (!test_bit(In_sync, &rdev->flags) &&
 678                    r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
 679                        continue;
 680
 681                dev_sector = r10_bio->devs[slot].addr;
 682                if (is_badblock(rdev, dev_sector, sectors,
 683                                &first_bad, &bad_sectors)) {
 684                        if (best_dist < MaxSector)
 685                                /* Already have a better slot */
 686                                continue;
 687                        if (first_bad <= dev_sector) {
 688                                /* Cannot read here.  If this is the
 689                                 * 'primary' device, then we must not read
 690                                 * beyond 'bad_sectors' from another device.
 691                                 */
 692                                bad_sectors -= (dev_sector - first_bad);
 693                                if (!do_balance && sectors > bad_sectors)
 694                                        sectors = bad_sectors;
 695                                if (best_good_sectors > sectors)
 696                                        best_good_sectors = sectors;
 697                        } else {
 698                                sector_t good_sectors =
 699                                        first_bad - dev_sector;
 700                                if (good_sectors > best_good_sectors) {
 701                                        best_good_sectors = good_sectors;
 702                                        best_slot = slot;
 703                                        best_rdev = rdev;
 704                                }
 705                                if (!do_balance)
 706                                        /* Must read from here */
 707                                        break;
 708                        }
 709                        continue;
 710                } else
 711                        best_good_sectors = sectors;
 712
 713                if (!do_balance)
 714                        break;
 715
 716                /* This optimisation is debatable, and completely destroys
 717                 * sequential read speed for 'far copies' arrays.  So only
 718                 * keep it for 'near' arrays, and review those later.
 719                 */
 720                if (conf->near_copies > 1 && !atomic_read(&rdev->nr_pending))
 721                        break;
 722
 723                /* for far > 1 always use the lowest address */
 724                if (conf->far_copies > 1)
 725                        new_distance = r10_bio->devs[slot].addr;
 726                else
 727                        new_distance = abs(r10_bio->devs[slot].addr -
 728                                           conf->mirrors[disk].head_position);
 729                if (new_distance < best_dist) {
 730                        best_dist = new_distance;
 731                        best_slot = slot;
 732                        best_rdev = rdev;
 733                }
 734        }
 735        if (slot >= conf->copies) {
 736                slot = best_slot;
 737                rdev = best_rdev;
 738        }
 739
 740        if (slot >= 0) {
 741                atomic_inc(&rdev->nr_pending);
 742                if (test_bit(Faulty, &rdev->flags)) {
 743                        /* Cannot risk returning a device that failed
 744                         * before we inc'ed nr_pending
 745                         */
 746                        rdev_dec_pending(rdev, conf->mddev);
 747                        goto retry;
 748                }
 749                r10_bio->read_slot = slot;
 750        } else
 751                rdev = NULL;
 752        rcu_read_unlock();
 753        *max_sectors = best_good_sectors;
 754
 755        return rdev;
 756}
 757
 758static int raid10_congested(void *data, int bits)
 759{
 760        struct mddev *mddev = data;
 761        struct r10conf *conf = mddev->private;
 762        int i, ret = 0;
 763
 764        if ((bits & (1 << BDI_async_congested)) &&
 765            conf->pending_count >= max_queued_requests)
 766                return 1;
 767
 768        if (mddev_congested(mddev, bits))
 769                return 1;
 770        rcu_read_lock();
 771        for (i = 0; i < conf->raid_disks && ret == 0; i++) {
 772                struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
 773                if (rdev && !test_bit(Faulty, &rdev->flags)) {
 774                        struct request_queue *q = bdev_get_queue(rdev->bdev);
 775
 776                        ret |= bdi_congested(&q->backing_dev_info, bits);
 777                }
 778        }
 779        rcu_read_unlock();
 780        return ret;
 781}
 782
 783static void flush_pending_writes(struct r10conf *conf)
 784{
 785        /* Any writes that have been queued but are awaiting
 786         * bitmap updates get flushed here.
 787         */
 788        spin_lock_irq(&conf->device_lock);
 789
 790        if (conf->pending_bio_list.head) {
 791                struct bio *bio;
 792                bio = bio_list_get(&conf->pending_bio_list);
 793                conf->pending_count = 0;
 794                spin_unlock_irq(&conf->device_lock);
 795                /* flush any pending bitmap writes to disk
 796                 * before proceeding w/ I/O */
 797                bitmap_unplug(conf->mddev->bitmap);
 798                wake_up(&conf->wait_barrier);
 799
 800                while (bio) { /* submit pending writes */
 801                        struct bio *next = bio->bi_next;
 802                        bio->bi_next = NULL;
 803                        generic_make_request(bio);
 804                        bio = next;
 805                }
 806        } else
 807                spin_unlock_irq(&conf->device_lock);
 808}
 809
 810/* Barriers....
 811 * Sometimes we need to suspend IO while we do something else,
 812 * either some resync/recovery, or reconfigure the array.
 813 * To do this we raise a 'barrier'.
 814 * The 'barrier' is a counter that can be raised multiple times
 815 * to count how many activities are happening which preclude
 816 * normal IO.
 817 * We can only raise the barrier if there is no pending IO.
 818 * i.e. if nr_pending == 0.
 819 * We choose only to raise the barrier if no-one is waiting for the
 820 * barrier to go down.  This means that as soon as an IO request
 821 * is ready, no other operations which require a barrier will start
 822 * until the IO request has had a chance.
 823 *
 824 * So: regular IO calls 'wait_barrier'.  When that returns there
 825 *    is no backgroup IO happening,  It must arrange to call
 826 *    allow_barrier when it has finished its IO.
 827 * backgroup IO calls must call raise_barrier.  Once that returns
 828 *    there is no normal IO happeing.  It must arrange to call
 829 *    lower_barrier when the particular background IO completes.
 830 */
 831
 832static void raise_barrier(struct r10conf *conf, int force)
 833{
 834        BUG_ON(force && !conf->barrier);
 835        spin_lock_irq(&conf->resync_lock);
 836
 837        /* Wait until no block IO is waiting (unless 'force') */
 838        wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
 839                            conf->resync_lock, );
 840
 841        /* block any new IO from starting */
 842        conf->barrier++;
 843
 844        /* Now wait for all pending IO to complete */
 845        wait_event_lock_irq(conf->wait_barrier,
 846                            !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
 847                            conf->resync_lock, );
 848
 849        spin_unlock_irq(&conf->resync_lock);
 850}
 851
 852static void lower_barrier(struct r10conf *conf)
 853{
 854        unsigned long flags;
 855        spin_lock_irqsave(&conf->resync_lock, flags);
 856        conf->barrier--;
 857        spin_unlock_irqrestore(&conf->resync_lock, flags);
 858        wake_up(&conf->wait_barrier);
 859}
 860
 861static void wait_barrier(struct r10conf *conf)
 862{
 863        spin_lock_irq(&conf->resync_lock);
 864        if (conf->barrier) {
 865                conf->nr_waiting++;
 866                wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
 867                                    conf->resync_lock,
 868                                    );
 869                conf->nr_waiting--;
 870        }
 871        conf->nr_pending++;
 872        spin_unlock_irq(&conf->resync_lock);
 873}
 874
 875static void allow_barrier(struct r10conf *conf)
 876{
 877        unsigned long flags;
 878        spin_lock_irqsave(&conf->resync_lock, flags);
 879        conf->nr_pending--;
 880        spin_unlock_irqrestore(&conf->resync_lock, flags);
 881        wake_up(&conf->wait_barrier);
 882}
 883
 884static void freeze_array(struct r10conf *conf)
 885{
 886        /* stop syncio and normal IO and wait for everything to
 887         * go quiet.
 888         * We increment barrier and nr_waiting, and then
 889         * wait until nr_pending match nr_queued+1
 890         * This is called in the context of one normal IO request
 891         * that has failed. Thus any sync request that might be pending
 892         * will be blocked by nr_pending, and we need to wait for
 893         * pending IO requests to complete or be queued for re-try.
 894         * Thus the number queued (nr_queued) plus this request (1)
 895         * must match the number of pending IOs (nr_pending) before
 896         * we continue.
 897         */
 898        spin_lock_irq(&conf->resync_lock);
 899        conf->barrier++;
 900        conf->nr_waiting++;
 901        wait_event_lock_irq(conf->wait_barrier,
 902                            conf->nr_pending == conf->nr_queued+1,
 903                            conf->resync_lock,
 904                            flush_pending_writes(conf));
 905
 906        spin_unlock_irq(&conf->resync_lock);
 907}
 908
 909static void unfreeze_array(struct r10conf *conf)
 910{
 911        /* reverse the effect of the freeze */
 912        spin_lock_irq(&conf->resync_lock);
 913        conf->barrier--;
 914        conf->nr_waiting--;
 915        wake_up(&conf->wait_barrier);
 916        spin_unlock_irq(&conf->resync_lock);
 917}
 918
 919static void make_request(struct mddev *mddev, struct bio * bio)
 920{
 921        struct r10conf *conf = mddev->private;
 922        struct r10bio *r10_bio;
 923        struct bio *read_bio;
 924        int i;
 925        int chunk_sects = conf->chunk_mask + 1;
 926        const int rw = bio_data_dir(bio);
 927        const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
 928        const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
 929        unsigned long flags;
 930        struct md_rdev *blocked_rdev;
 931        int plugged;
 932        int sectors_handled;
 933        int max_sectors;
 934
 935        if (unlikely(bio->bi_rw & REQ_FLUSH)) {
 936                md_flush_request(mddev, bio);
 937                return;
 938        }
 939
 940        /* If this request crosses a chunk boundary, we need to
 941         * split it.  This will only happen for 1 PAGE (or less) requests.
 942         */
 943        if (unlikely( (bio->bi_sector & conf->chunk_mask) + (bio->bi_size >> 9)
 944                      > chunk_sects &&
 945                    conf->near_copies < conf->raid_disks)) {
 946                struct bio_pair *bp;
 947                /* Sanity check -- queue functions should prevent this happening */
 948                if (bio->bi_vcnt != 1 ||
 949                    bio->bi_idx != 0)
 950                        goto bad_map;
 951                /* This is a one page bio that upper layers
 952                 * refuse to split for us, so we need to split it.
 953                 */
 954                bp = bio_split(bio,
 955                               chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
 956
 957                /* Each of these 'make_request' calls will call 'wait_barrier'.
 958                 * If the first succeeds but the second blocks due to the resync
 959                 * thread raising the barrier, we will deadlock because the
 960                 * IO to the underlying device will be queued in generic_make_request
 961                 * and will never complete, so will never reduce nr_pending.
 962                 * So increment nr_waiting here so no new raise_barriers will
 963                 * succeed, and so the second wait_barrier cannot block.
 964                 */
 965                spin_lock_irq(&conf->resync_lock);
 966                conf->nr_waiting++;
 967                spin_unlock_irq(&conf->resync_lock);
 968
 969                make_request(mddev, &bp->bio1);
 970                make_request(mddev, &bp->bio2);
 971
 972                spin_lock_irq(&conf->resync_lock);
 973                conf->nr_waiting--;
 974                wake_up(&conf->wait_barrier);
 975                spin_unlock_irq(&conf->resync_lock);
 976
 977                bio_pair_release(bp);
 978                return;
 979        bad_map:
 980                printk("md/raid10:%s: make_request bug: can't convert block across chunks"
 981                       " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
 982                       (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
 983
 984                bio_io_error(bio);
 985                return;
 986        }
 987
 988        md_write_start(mddev, bio);
 989
 990        /*
 991         * Register the new request and wait if the reconstruction
 992         * thread has put up a bar for new requests.
 993         * Continue immediately if no resync is active currently.
 994         */
 995        wait_barrier(conf);
 996
 997        r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
 998
 999        r10_bio->master_bio = bio;
1000        r10_bio->sectors = bio->bi_size >> 9;
1001
1002        r10_bio->mddev = mddev;
1003        r10_bio->sector = bio->bi_sector;
1004        r10_bio->state = 0;
1005
1006        /* We might need to issue multiple reads to different
1007         * devices if there are bad blocks around, so we keep
1008         * track of the number of reads in bio->bi_phys_segments.
1009         * If this is 0, there is only one r10_bio and no locking
1010         * will be needed when the request completes.  If it is
1011         * non-zero, then it is the number of not-completed requests.
1012         */
1013        bio->bi_phys_segments = 0;
1014        clear_bit(BIO_SEG_VALID, &bio->bi_flags);
1015
1016        if (rw == READ) {
1017                /*
1018                 * read balancing logic:
1019                 */
1020                struct md_rdev *rdev;
1021                int slot;
1022
1023read_again:
1024                rdev = read_balance(conf, r10_bio, &max_sectors);
1025                if (!rdev) {
1026                        raid_end_bio_io(r10_bio);
1027                        return;
1028                }
1029                slot = r10_bio->read_slot;
1030
1031                read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1032                md_trim_bio(read_bio, r10_bio->sector - bio->bi_sector,
1033                            max_sectors);
1034
1035                r10_bio->devs[slot].bio = read_bio;
1036                r10_bio->devs[slot].rdev = rdev;
1037
1038                read_bio->bi_sector = r10_bio->devs[slot].addr +
1039                        rdev->data_offset;
1040                read_bio->bi_bdev = rdev->bdev;
1041                read_bio->bi_end_io = raid10_end_read_request;
1042                read_bio->bi_rw = READ | do_sync;
1043                read_bio->bi_private = r10_bio;
1044
1045                if (max_sectors < r10_bio->sectors) {
1046                        /* Could not read all from this device, so we will
1047                         * need another r10_bio.
1048                         */
1049                        sectors_handled = (r10_bio->sectors + max_sectors
1050                                           - bio->bi_sector);
1051                        r10_bio->sectors = max_sectors;
1052                        spin_lock_irq(&conf->device_lock);
1053                        if (bio->bi_phys_segments == 0)
1054                                bio->bi_phys_segments = 2;
1055                        else
1056                                bio->bi_phys_segments++;
1057                        spin_unlock(&conf->device_lock);
1058                        /* Cannot call generic_make_request directly
1059                         * as that will be queued in __generic_make_request
1060                         * and subsequent mempool_alloc might block
1061                         * waiting for it.  so hand bio over to raid10d.
1062                         */
1063                        reschedule_retry(r10_bio);
1064
1065                        r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1066
1067                        r10_bio->master_bio = bio;
1068                        r10_bio->sectors = ((bio->bi_size >> 9)
1069                                            - sectors_handled);
1070                        r10_bio->state = 0;
1071                        r10_bio->mddev = mddev;
1072                        r10_bio->sector = bio->bi_sector + sectors_handled;
1073                        goto read_again;
1074                } else
1075                        generic_make_request(read_bio);
1076                return;
1077        }
1078
1079        /*
1080         * WRITE:
1081         */
1082        if (conf->pending_count >= max_queued_requests) {
1083                md_wakeup_thread(mddev->thread);
1084                wait_event(conf->wait_barrier,
1085                           conf->pending_count < max_queued_requests);
1086        }
1087        /* first select target devices under rcu_lock and
1088         * inc refcount on their rdev.  Record them by setting
1089         * bios[x] to bio
1090         * If there are known/acknowledged bad blocks on any device
1091         * on which we have seen a write error, we want to avoid
1092         * writing to those blocks.  This potentially requires several
1093         * writes to write around the bad blocks.  Each set of writes
1094         * gets its own r10_bio with a set of bios attached.  The number
1095         * of r10_bios is recored in bio->bi_phys_segments just as with
1096         * the read case.
1097         */
1098        plugged = mddev_check_plugged(mddev);
1099
1100        r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
1101        raid10_find_phys(conf, r10_bio);
1102retry_write:
1103        blocked_rdev = NULL;
1104        rcu_read_lock();
1105        max_sectors = r10_bio->sectors;
1106
1107        for (i = 0;  i < conf->copies; i++) {
1108                int d = r10_bio->devs[i].devnum;
1109                struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
1110                struct md_rdev *rrdev = rcu_dereference(
1111                        conf->mirrors[d].replacement);
1112                if (rdev == rrdev)
1113                        rrdev = NULL;
1114                if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1115                        atomic_inc(&rdev->nr_pending);
1116                        blocked_rdev = rdev;
1117                        break;
1118                }
1119                if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) {
1120                        atomic_inc(&rrdev->nr_pending);
1121                        blocked_rdev = rrdev;
1122                        break;
1123                }
1124                if (rrdev && test_bit(Faulty, &rrdev->flags))
1125                        rrdev = NULL;
1126
1127                r10_bio->devs[i].bio = NULL;
1128                r10_bio->devs[i].repl_bio = NULL;
1129                if (!rdev || test_bit(Faulty, &rdev->flags)) {
1130                        set_bit(R10BIO_Degraded, &r10_bio->state);
1131                        continue;
1132                }
1133                if (test_bit(WriteErrorSeen, &rdev->flags)) {
1134                        sector_t first_bad;
1135                        sector_t dev_sector = r10_bio->devs[i].addr;
1136                        int bad_sectors;
1137                        int is_bad;
1138
1139                        is_bad = is_badblock(rdev, dev_sector,
1140                                             max_sectors,
1141                                             &first_bad, &bad_sectors);
1142                        if (is_bad < 0) {
1143                                /* Mustn't write here until the bad block
1144                                 * is acknowledged
1145                                 */
1146                                atomic_inc(&rdev->nr_pending);
1147                                set_bit(BlockedBadBlocks, &rdev->flags);
1148                                blocked_rdev = rdev;
1149                                break;
1150                        }
1151                        if (is_bad && first_bad <= dev_sector) {
1152                                /* Cannot write here at all */
1153                                bad_sectors -= (dev_sector - first_bad);
1154                                if (bad_sectors < max_sectors)
1155                                        /* Mustn't write more than bad_sectors
1156                                         * to other devices yet
1157                                         */
1158                                        max_sectors = bad_sectors;
1159                                /* We don't set R10BIO_Degraded as that
1160                                 * only applies if the disk is missing,
1161                                 * so it might be re-added, and we want to
1162                                 * know to recover this chunk.
1163                                 * In this case the device is here, and the
1164                                 * fact that this chunk is not in-sync is
1165                                 * recorded in the bad block log.
1166                                 */
1167                                continue;
1168                        }
1169                        if (is_bad) {
1170                                int good_sectors = first_bad - dev_sector;
1171                                if (good_sectors < max_sectors)
1172                                        max_sectors = good_sectors;
1173                        }
1174                }
1175                r10_bio->devs[i].bio = bio;
1176                atomic_inc(&rdev->nr_pending);
1177                if (rrdev) {
1178                        r10_bio->devs[i].repl_bio = bio;
1179                        atomic_inc(&rrdev->nr_pending);
1180                }
1181        }
1182        rcu_read_unlock();
1183
1184        if (unlikely(blocked_rdev)) {
1185                /* Have to wait for this device to get unblocked, then retry */
1186                int j;
1187                int d;
1188
1189                for (j = 0; j < i; j++) {
1190                        if (r10_bio->devs[j].bio) {
1191                                d = r10_bio->devs[j].devnum;
1192                                rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1193                        }
1194                        if (r10_bio->devs[j].repl_bio) {
1195                                struct md_rdev *rdev;
1196                                d = r10_bio->devs[j].devnum;
1197                                rdev = conf->mirrors[d].replacement;
1198                                if (!rdev) {
1199                                        /* Race with remove_disk */
1200                                        smp_mb();
1201                                        rdev = conf->mirrors[d].rdev;
1202                                }
1203                                rdev_dec_pending(rdev, mddev);
1204                        }
1205                }
1206                allow_barrier(conf);
1207                md_wait_for_blocked_rdev(blocked_rdev, mddev);
1208                wait_barrier(conf);
1209                goto retry_write;
1210        }
1211
1212        if (max_sectors < r10_bio->sectors) {
1213                /* We are splitting this into multiple parts, so
1214                 * we need to prepare for allocating another r10_bio.
1215                 */
1216                r10_bio->sectors = max_sectors;
1217                spin_lock_irq(&conf->device_lock);
1218                if (bio->bi_phys_segments == 0)
1219                        bio->bi_phys_segments = 2;
1220                else
1221                        bio->bi_phys_segments++;
1222                spin_unlock_irq(&conf->device_lock);
1223        }
1224        sectors_handled = r10_bio->sector + max_sectors - bio->bi_sector;
1225
1226        atomic_set(&r10_bio->remaining, 1);
1227        bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
1228
1229        for (i = 0; i < conf->copies; i++) {
1230                struct bio *mbio;
1231                int d = r10_bio->devs[i].devnum;
1232                if (!r10_bio->devs[i].bio)
1233                        continue;
1234
1235                mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1236                md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
1237                            max_sectors);
1238                r10_bio->devs[i].bio = mbio;
1239
1240                mbio->bi_sector = (r10_bio->devs[i].addr+
1241                                   conf->mirrors[d].rdev->data_offset);
1242                mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
1243                mbio->bi_end_io = raid10_end_write_request;
1244                mbio->bi_rw = WRITE | do_sync | do_fua;
1245                mbio->bi_private = r10_bio;
1246
1247                atomic_inc(&r10_bio->remaining);
1248                spin_lock_irqsave(&conf->device_lock, flags);
1249                bio_list_add(&conf->pending_bio_list, mbio);
1250                conf->pending_count++;
1251                spin_unlock_irqrestore(&conf->device_lock, flags);
1252
1253                if (!r10_bio->devs[i].repl_bio)
1254                        continue;
1255
1256                mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1257                md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
1258                            max_sectors);
1259                r10_bio->devs[i].repl_bio = mbio;
1260
1261                /* We are actively writing to the original device
1262                 * so it cannot disappear, so the replacement cannot
1263                 * become NULL here
1264                 */
1265                mbio->bi_sector = (r10_bio->devs[i].addr+
1266                                   conf->mirrors[d].replacement->data_offset);
1267                mbio->bi_bdev = conf->mirrors[d].replacement->bdev;
1268                mbio->bi_end_io = raid10_end_write_request;
1269                mbio->bi_rw = WRITE | do_sync | do_fua;
1270                mbio->bi_private = r10_bio;
1271
1272                atomic_inc(&r10_bio->remaining);
1273                spin_lock_irqsave(&conf->device_lock, flags);
1274                bio_list_add(&conf->pending_bio_list, mbio);
1275                conf->pending_count++;
1276                spin_unlock_irqrestore(&conf->device_lock, flags);
1277        }
1278
1279        /* Don't remove the bias on 'remaining' (one_write_done) until
1280         * after checking if we need to go around again.
1281         */
1282
1283        if (sectors_handled < (bio->bi_size >> 9)) {
1284                one_write_done(r10_bio);
1285                /* We need another r10_bio.  It has already been counted
1286                 * in bio->bi_phys_segments.
1287                 */
1288                r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1289
1290                r10_bio->master_bio = bio;
1291                r10_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
1292
1293                r10_bio->mddev = mddev;
1294                r10_bio->sector = bio->bi_sector + sectors_handled;
1295                r10_bio->state = 0;
1296                goto retry_write;
1297        }
1298        one_write_done(r10_bio);
1299
1300        /* In case raid10d snuck in to freeze_array */
1301        wake_up(&conf->wait_barrier);
1302
1303        if (do_sync || !mddev->bitmap || !plugged)
1304                md_wakeup_thread(mddev->thread);
1305}
1306
1307static void status(struct seq_file *seq, struct mddev *mddev)
1308{
1309        struct r10conf *conf = mddev->private;
1310        int i;
1311
1312        if (conf->near_copies < conf->raid_disks)
1313                seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
1314        if (conf->near_copies > 1)
1315                seq_printf(seq, " %d near-copies", conf->near_copies);
1316        if (conf->far_copies > 1) {
1317                if (conf->far_offset)
1318                        seq_printf(seq, " %d offset-copies", conf->far_copies);
1319                else
1320                        seq_printf(seq, " %d far-copies", conf->far_copies);
1321        }
1322        seq_printf(seq, " [%d/%d] [", conf->raid_disks,
1323                                        conf->raid_disks - mddev->degraded);
1324        for (i = 0; i < conf->raid_disks; i++)
1325                seq_printf(seq, "%s",
1326                              conf->mirrors[i].rdev &&
1327                              test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_");
1328        seq_printf(seq, "]");
1329}
1330
1331/* check if there are enough drives for
1332 * every block to appear on atleast one.
1333 * Don't consider the device numbered 'ignore'
1334 * as we might be about to remove it.
1335 */
1336static int enough(struct r10conf *conf, int ignore)
1337{
1338        int first = 0;
1339
1340        do {
1341                int n = conf->copies;
1342                int cnt = 0;
1343                while (n--) {
1344                        if (conf->mirrors[first].rdev &&
1345                            first != ignore)
1346                                cnt++;
1347                        first = (first+1) % conf->raid_disks;
1348                }
1349                if (cnt == 0)
1350                        return 0;
1351        } while (first != 0);
1352        return 1;
1353}
1354
1355static void error(struct mddev *mddev, struct md_rdev *rdev)
1356{
1357        char b[BDEVNAME_SIZE];
1358        struct r10conf *conf = mddev->private;
1359
1360        /*
1361         * If it is not operational, then we have already marked it as dead
1362         * else if it is the last working disks, ignore the error, let the
1363         * next level up know.
1364         * else mark the drive as failed
1365         */
1366        if (test_bit(In_sync, &rdev->flags)
1367            && !enough(conf, rdev->raid_disk))
1368                /*
1369                 * Don't fail the drive, just return an IO error.
1370                 */
1371                return;
1372        if (test_and_clear_bit(In_sync, &rdev->flags)) {
1373                unsigned long flags;
1374                spin_lock_irqsave(&conf->device_lock, flags);
1375                mddev->degraded++;
1376                spin_unlock_irqrestore(&conf->device_lock, flags);
1377                /*
1378                 * if recovery is running, make sure it aborts.
1379                 */
1380                set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1381        }
1382        set_bit(Blocked, &rdev->flags);
1383        set_bit(Faulty, &rdev->flags);
1384        set_bit(MD_CHANGE_DEVS, &mddev->flags);
1385        printk(KERN_ALERT
1386               "md/raid10:%s: Disk failure on %s, disabling device.\n"
1387               "md/raid10:%s: Operation continuing on %d devices.\n",
1388               mdname(mddev), bdevname(rdev->bdev, b),
1389               mdname(mddev), conf->raid_disks - mddev->degraded);
1390}
1391
1392static void print_conf(struct r10conf *conf)
1393{
1394        int i;
1395        struct mirror_info *tmp;
1396
1397        printk(KERN_DEBUG "RAID10 conf printout:\n");
1398        if (!conf) {
1399                printk(KERN_DEBUG "(!conf)\n");
1400                return;
1401        }
1402        printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
1403                conf->raid_disks);
1404
1405        for (i = 0; i < conf->raid_disks; i++) {
1406                char b[BDEVNAME_SIZE];
1407                tmp = conf->mirrors + i;
1408                if (tmp->rdev)
1409                        printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
1410                                i, !test_bit(In_sync, &tmp->rdev->flags),
1411                                !test_bit(Faulty, &tmp->rdev->flags),
1412                                bdevname(tmp->rdev->bdev,b));
1413        }
1414}
1415
1416static void close_sync(struct r10conf *conf)
1417{
1418        wait_barrier(conf);
1419        allow_barrier(conf);
1420
1421        mempool_destroy(conf->r10buf_pool);
1422        conf->r10buf_pool = NULL;
1423}
1424
1425static int raid10_spare_active(struct mddev *mddev)
1426{
1427        int i;
1428        struct r10conf *conf = mddev->private;
1429        struct mirror_info *tmp;
1430        int count = 0;
1431        unsigned long flags;
1432
1433        /*
1434         * Find all non-in_sync disks within the RAID10 configuration
1435         * and mark them in_sync
1436         */
1437        for (i = 0; i < conf->raid_disks; i++) {
1438                tmp = conf->mirrors + i;
1439                if (tmp->replacement
1440                    && tmp->replacement->recovery_offset == MaxSector
1441                    && !test_bit(Faulty, &tmp->replacement->flags)
1442                    && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
1443                        /* Replacement has just become active */
1444                        if (!tmp->rdev
1445                            || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
1446                                count++;
1447                        if (tmp->rdev) {
1448                                /* Replaced device not technically faulty,
1449                                 * but we need to be sure it gets removed
1450                                 * and never re-added.
1451                                 */
1452                                set_bit(Faulty, &tmp->rdev->flags);
1453                                sysfs_notify_dirent_safe(
1454                                        tmp->rdev->sysfs_state);
1455                        }
1456                        sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
1457                } else if (tmp->rdev
1458                           && !test_bit(Faulty, &tmp->rdev->flags)
1459                           && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
1460                        count++;
1461                        sysfs_notify_dirent(tmp->rdev->sysfs_state);
1462                }
1463        }
1464        spin_lock_irqsave(&conf->device_lock, flags);
1465        mddev->degraded -= count;
1466        spin_unlock_irqrestore(&conf->device_lock, flags);
1467
1468        print_conf(conf);
1469        return count;
1470}
1471
1472
1473static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1474{
1475        struct r10conf *conf = mddev->private;
1476        int err = -EEXIST;
1477        int mirror;
1478        int first = 0;
1479        int last = conf->raid_disks - 1;
1480
1481        if (mddev->recovery_cp < MaxSector)
1482                /* only hot-add to in-sync arrays, as recovery is
1483                 * very different from resync
1484                 */
1485                return -EBUSY;
1486        if (!enough(conf, -1))
1487                return -EINVAL;
1488
1489        if (rdev->raid_disk >= 0)
1490                first = last = rdev->raid_disk;
1491
1492        if (rdev->saved_raid_disk >= first &&
1493            conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1494                mirror = rdev->saved_raid_disk;
1495        else
1496                mirror = first;
1497        for ( ; mirror <= last ; mirror++) {
1498                struct mirror_info *p = &conf->mirrors[mirror];
1499                if (p->recovery_disabled == mddev->recovery_disabled)
1500                        continue;
1501                if (p->rdev) {
1502                        if (!test_bit(WantReplacement, &p->rdev->flags) ||
1503                            p->replacement != NULL)
1504                                continue;
1505                        clear_bit(In_sync, &rdev->flags);
1506                        set_bit(Replacement, &rdev->flags);
1507                        rdev->raid_disk = mirror;
1508                        err = 0;
1509                        disk_stack_limits(mddev->gendisk, rdev->bdev,
1510                                          rdev->data_offset << 9);
1511                        if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
1512                                blk_queue_max_segments(mddev->queue, 1);
1513                                blk_queue_segment_boundary(mddev->queue,
1514                                                           PAGE_CACHE_SIZE - 1);
1515                        }
1516                        conf->fullsync = 1;
1517                        rcu_assign_pointer(p->replacement, rdev);
1518                        break;
1519                }
1520
1521                disk_stack_limits(mddev->gendisk, rdev->bdev,
1522                                  rdev->data_offset << 9);
1523                /* as we don't honour merge_bvec_fn, we must
1524                 * never risk violating it, so limit
1525                 * ->max_segments to one lying with a single
1526                 * page, as a one page request is never in
1527                 * violation.
1528                 */
1529                if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
1530                        blk_queue_max_segments(mddev->queue, 1);
1531                        blk_queue_segment_boundary(mddev->queue,
1532                                                   PAGE_CACHE_SIZE - 1);
1533                }
1534
1535                p->head_position = 0;
1536                p->recovery_disabled = mddev->recovery_disabled - 1;
1537                rdev->raid_disk = mirror;
1538                err = 0;
1539                if (rdev->saved_raid_disk != mirror)
1540                        conf->fullsync = 1;
1541                rcu_assign_pointer(p->rdev, rdev);
1542                break;
1543        }
1544
1545        md_integrity_add_rdev(rdev, mddev);
1546        print_conf(conf);
1547        return err;
1548}
1549
1550static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1551{
1552        struct r10conf *conf = mddev->private;
1553        int err = 0;
1554        int number = rdev->raid_disk;
1555        struct md_rdev **rdevp;
1556        struct mirror_info *p = conf->mirrors + number;
1557
1558        print_conf(conf);
1559        if (rdev == p->rdev)
1560                rdevp = &p->rdev;
1561        else if (rdev == p->replacement)
1562                rdevp = &p->replacement;
1563        else
1564                return 0;
1565
1566        if (test_bit(In_sync, &rdev->flags) ||
1567            atomic_read(&rdev->nr_pending)) {
1568                err = -EBUSY;
1569                goto abort;
1570        }
1571        /* Only remove faulty devices if recovery
1572         * is not possible.
1573         */
1574        if (!test_bit(Faulty, &rdev->flags) &&
1575            mddev->recovery_disabled != p->recovery_disabled &&
1576            (!p->replacement || p->replacement == rdev) &&
1577            enough(conf, -1)) {
1578                err = -EBUSY;
1579                goto abort;
1580        }
1581        *rdevp = NULL;
1582        synchronize_rcu();
1583        if (atomic_read(&rdev->nr_pending)) {
1584                /* lost the race, try later */
1585                err = -EBUSY;
1586                *rdevp = rdev;
1587                goto abort;
1588        } else if (p->replacement) {
1589                /* We must have just cleared 'rdev' */
1590                p->rdev = p->replacement;
1591                clear_bit(Replacement, &p->replacement->flags);
1592                smp_mb(); /* Make sure other CPUs may see both as identical
1593                           * but will never see neither -- if they are careful.
1594                           */
1595                p->replacement = NULL;
1596                clear_bit(WantReplacement, &rdev->flags);
1597        } else
1598                /* We might have just remove the Replacement as faulty
1599                 * Clear the flag just in case
1600                 */
1601                clear_bit(WantReplacement, &rdev->flags);
1602
1603        err = md_integrity_register(mddev);
1604
1605abort:
1606
1607        print_conf(conf);
1608        return err;
1609}
1610
1611
1612static void end_sync_read(struct bio *bio, int error)
1613{
1614        struct r10bio *r10_bio = bio->bi_private;
1615        struct r10conf *conf = r10_bio->mddev->private;
1616        int d;
1617
1618        d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
1619
1620        if (test_bit(BIO_UPTODATE, &bio->bi_flags))
1621                set_bit(R10BIO_Uptodate, &r10_bio->state);
1622        else
1623                /* The write handler will notice the lack of
1624                 * R10BIO_Uptodate and record any errors etc
1625                 */
1626                atomic_add(r10_bio->sectors,
1627                           &conf->mirrors[d].rdev->corrected_errors);
1628
1629        /* for reconstruct, we always reschedule after a read.
1630         * for resync, only after all reads
1631         */
1632        rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
1633        if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
1634            atomic_dec_and_test(&r10_bio->remaining)) {
1635                /* we have read all the blocks,
1636                 * do the comparison in process context in raid10d
1637                 */
1638                reschedule_retry(r10_bio);
1639        }
1640}
1641
1642static void end_sync_request(struct r10bio *r10_bio)
1643{
1644        struct mddev *mddev = r10_bio->mddev;
1645
1646        while (atomic_dec_and_test(&r10_bio->remaining)) {
1647                if (r10_bio->master_bio == NULL) {
1648                        /* the primary of several recovery bios */
1649                        sector_t s = r10_bio->sectors;
1650                        if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
1651                            test_bit(R10BIO_WriteError, &r10_bio->state))
1652                                reschedule_retry(r10_bio);
1653                        else
1654                                put_buf(r10_bio);
1655                        md_done_sync(mddev, s, 1);
1656                        break;
1657                } else {
1658                        struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio;
1659                        if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
1660                            test_bit(R10BIO_WriteError, &r10_bio->state))
1661                                reschedule_retry(r10_bio);
1662                        else
1663                                put_buf(r10_bio);
1664                        r10_bio = r10_bio2;
1665                }
1666        }
1667}
1668
1669static void end_sync_write(struct bio *bio, int error)
1670{
1671        int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1672        struct r10bio *r10_bio = bio->bi_private;
1673        struct mddev *mddev = r10_bio->mddev;
1674        struct r10conf *conf = mddev->private;
1675        int d;
1676        sector_t first_bad;
1677        int bad_sectors;
1678        int slot;
1679        int repl;
1680        struct md_rdev *rdev = NULL;
1681
1682        d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
1683        if (repl)
1684                rdev = conf->mirrors[d].replacement;
1685        if (!rdev) {
1686                smp_mb();
1687                rdev = conf->mirrors[d].rdev;
1688        }
1689
1690        if (!uptodate) {
1691                if (repl)
1692                        md_error(mddev, rdev);
1693                else {
1694                        set_bit(WriteErrorSeen, &rdev->flags);
1695                        if (!test_and_set_bit(WantReplacement, &rdev->flags))
1696                                set_bit(MD_RECOVERY_NEEDED,
1697                                        &rdev->mddev->recovery);
1698                        set_bit(R10BIO_WriteError, &r10_bio->state);
1699                }
1700        } else if (is_badblock(rdev,
1701                             r10_bio->devs[slot].addr,
1702                             r10_bio->sectors,
1703                             &first_bad, &bad_sectors))
1704                set_bit(R10BIO_MadeGood, &r10_bio->state);
1705
1706        rdev_dec_pending(rdev, mddev);
1707
1708        end_sync_request(r10_bio);
1709}
1710
1711/*
1712 * Note: sync and recover and handled very differently for raid10
1713 * This code is for resync.
1714 * For resync, we read through virtual addresses and read all blocks.
1715 * If there is any error, we schedule a write.  The lowest numbered
1716 * drive is authoritative.
1717 * However requests come for physical address, so we need to map.
1718 * For every physical address there are raid_disks/copies virtual addresses,
1719 * which is always are least one, but is not necessarly an integer.
1720 * This means that a physical address can span multiple chunks, so we may
1721 * have to submit multiple io requests for a single sync request.
1722 */
1723/*
1724 * We check if all blocks are in-sync and only write to blocks that
1725 * aren't in sync
1726 */
1727static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
1728{
1729        struct r10conf *conf = mddev->private;
1730        int i, first;
1731        struct bio *tbio, *fbio;
1732
1733        atomic_set(&r10_bio->remaining, 1);
1734
1735        /* find the first device with a block */
1736        for (i=0; i<conf->copies; i++)
1737                if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags))
1738                        break;
1739
1740        if (i == conf->copies)
1741                goto done;
1742
1743        first = i;
1744        fbio = r10_bio->devs[i].bio;
1745
1746        /* now find blocks with errors */
1747        for (i=0 ; i < conf->copies ; i++) {
1748                int  j, d;
1749                int vcnt = r10_bio->sectors >> (PAGE_SHIFT-9);
1750
1751                tbio = r10_bio->devs[i].bio;
1752
1753                if (tbio->bi_end_io != end_sync_read)
1754                        continue;
1755                if (i == first)
1756                        continue;
1757                if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) {
1758                        /* We know that the bi_io_vec layout is the same for
1759                         * both 'first' and 'i', so we just compare them.
1760                         * All vec entries are PAGE_SIZE;
1761                         */
1762                        for (j = 0; j < vcnt; j++)
1763                                if (memcmp(page_address(fbio->bi_io_vec[j].bv_page),
1764                                           page_address(tbio->bi_io_vec[j].bv_page),
1765                                           PAGE_SIZE))
1766                                        break;
1767                        if (j == vcnt)
1768                                continue;
1769                        mddev->resync_mismatches += r10_bio->sectors;
1770                        if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
1771                                /* Don't fix anything. */
1772                                continue;
1773                }
1774                /* Ok, we need to write this bio, either to correct an
1775                 * inconsistency or to correct an unreadable block.
1776                 * First we need to fixup bv_offset, bv_len and
1777                 * bi_vecs, as the read request might have corrupted these
1778                 */
1779                tbio->bi_vcnt = vcnt;
1780                tbio->bi_size = r10_bio->sectors << 9;
1781                tbio->bi_idx = 0;
1782                tbio->bi_phys_segments = 0;
1783                tbio->bi_flags &= ~(BIO_POOL_MASK - 1);
1784                tbio->bi_flags |= 1 << BIO_UPTODATE;
1785                tbio->bi_next = NULL;
1786                tbio->bi_rw = WRITE;
1787                tbio->bi_private = r10_bio;
1788                tbio->bi_sector = r10_bio->devs[i].addr;
1789
1790                for (j=0; j < vcnt ; j++) {
1791                        tbio->bi_io_vec[j].bv_offset = 0;
1792                        tbio->bi_io_vec[j].bv_len = PAGE_SIZE;
1793
1794                        memcpy(page_address(tbio->bi_io_vec[j].bv_page),
1795                               page_address(fbio->bi_io_vec[j].bv_page),
1796                               PAGE_SIZE);
1797                }
1798                tbio->bi_end_io = end_sync_write;
1799
1800                d = r10_bio->devs[i].devnum;
1801                atomic_inc(&conf->mirrors[d].rdev->nr_pending);
1802                atomic_inc(&r10_bio->remaining);
1803                md_sync_acct(conf->mirrors[d].rdev->bdev, tbio->bi_size >> 9);
1804
1805                tbio->bi_sector += conf->mirrors[d].rdev->data_offset;
1806                tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
1807                generic_make_request(tbio);
1808        }
1809
1810        /* Now write out to any replacement devices
1811         * that are active
1812         */
1813        for (i = 0; i < conf->copies; i++) {
1814                int j, d;
1815                int vcnt = r10_bio->sectors >> (PAGE_SHIFT-9);
1816
1817                tbio = r10_bio->devs[i].repl_bio;
1818                if (!tbio || !tbio->bi_end_io)
1819                        continue;
1820                if (r10_bio->devs[i].bio->bi_end_io != end_sync_write
1821                    && r10_bio->devs[i].bio != fbio)
1822                        for (j = 0; j < vcnt; j++)
1823                                memcpy(page_address(tbio->bi_io_vec[j].bv_page),
1824                                       page_address(fbio->bi_io_vec[j].bv_page),
1825                                       PAGE_SIZE);
1826                d = r10_bio->devs[i].devnum;
1827                atomic_inc(&r10_bio->remaining);
1828                md_sync_acct(conf->mirrors[d].replacement->bdev,
1829                             tbio->bi_size >> 9);
1830                generic_make_request(tbio);
1831        }
1832
1833done:
1834        if (atomic_dec_and_test(&r10_bio->remaining)) {
1835                md_done_sync(mddev, r10_bio->sectors, 1);
1836                put_buf(r10_bio);
1837        }
1838}
1839
1840/*
1841 * Now for the recovery code.
1842 * Recovery happens across physical sectors.
1843 * We recover all non-is_sync drives by finding the virtual address of
1844 * each, and then choose a working drive that also has that virt address.
1845 * There is a separate r10_bio for each non-in_sync drive.
1846 * Only the first two slots are in use. The first for reading,
1847 * The second for writing.
1848 *
1849 */
1850static void fix_recovery_read_error(struct r10bio *r10_bio)
1851{
1852        /* We got a read error during recovery.
1853         * We repeat the read in smaller page-sized sections.
1854         * If a read succeeds, write it to the new device or record
1855         * a bad block if we cannot.
1856         * If a read fails, record a bad block on both old and
1857         * new devices.
1858         */
1859        struct mddev *mddev = r10_bio->mddev;
1860        struct r10conf *conf = mddev->private;
1861        struct bio *bio = r10_bio->devs[0].bio;
1862        sector_t sect = 0;
1863        int sectors = r10_bio->sectors;
1864        int idx = 0;
1865        int dr = r10_bio->devs[0].devnum;
1866        int dw = r10_bio->devs[1].devnum;
1867
1868        while (sectors) {
1869                int s = sectors;
1870                struct md_rdev *rdev;
1871                sector_t addr;
1872                int ok;
1873
1874                if (s > (PAGE_SIZE>>9))
1875                        s = PAGE_SIZE >> 9;
1876
1877                rdev = conf->mirrors[dr].rdev;
1878                addr = r10_bio->devs[0].addr + sect,
1879                ok = sync_page_io(rdev,
1880                                  addr,
1881                                  s << 9,
1882                                  bio->bi_io_vec[idx].bv_page,
1883                                  READ, false);
1884                if (ok) {
1885                        rdev = conf->mirrors[dw].rdev;
1886                        addr = r10_bio->devs[1].addr + sect;
1887                        ok = sync_page_io(rdev,
1888                                          addr,
1889                                          s << 9,
1890                                          bio->bi_io_vec[idx].bv_page,
1891                                          WRITE, false);
1892                        if (!ok) {
1893                                set_bit(WriteErrorSeen, &rdev->flags);
1894                                if (!test_and_set_bit(WantReplacement,
1895                                                      &rdev->flags))
1896                                        set_bit(MD_RECOVERY_NEEDED,
1897                                                &rdev->mddev->recovery);
1898                        }
1899                }
1900                if (!ok) {
1901                        /* We don't worry if we cannot set a bad block -
1902                         * it really is bad so there is no loss in not
1903                         * recording it yet
1904                         */
1905                        rdev_set_badblocks(rdev, addr, s, 0);
1906
1907                        if (rdev != conf->mirrors[dw].rdev) {
1908                                /* need bad block on destination too */
1909                                struct md_rdev *rdev2 = conf->mirrors[dw].rdev;
1910                                addr = r10_bio->devs[1].addr + sect;
1911                                ok = rdev_set_badblocks(rdev2, addr, s, 0);
1912                                if (!ok) {
1913                                        /* just abort the recovery */
1914                                        printk(KERN_NOTICE
1915                                               "md/raid10:%s: recovery aborted"
1916                                               " due to read error\n",
1917                                               mdname(mddev));
1918
1919                                        conf->mirrors[dw].recovery_disabled
1920                                                = mddev->recovery_disabled;
1921                                        set_bit(MD_RECOVERY_INTR,
1922                                                &mddev->recovery);
1923                                        break;
1924                                }
1925                        }
1926                }
1927
1928                sectors -= s;
1929                sect += s;
1930                idx++;
1931        }
1932}
1933
1934static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
1935{
1936        struct r10conf *conf = mddev->private;
1937        int d;
1938        struct bio *wbio, *wbio2;
1939
1940        if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
1941                fix_recovery_read_error(r10_bio);
1942                end_sync_request(r10_bio);
1943                return;
1944        }
1945
1946        /*
1947         * share the pages with the first bio
1948         * and submit the write request
1949         */
1950        d = r10_bio->devs[1].devnum;
1951        wbio = r10_bio->devs[1].bio;
1952        wbio2 = r10_bio->devs[1].repl_bio;
1953        if (wbio->bi_end_io) {
1954                atomic_inc(&conf->mirrors[d].rdev->nr_pending);
1955                md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9);
1956                generic_make_request(wbio);
1957        }
1958        if (wbio2 && wbio2->bi_end_io) {
1959                atomic_inc(&conf->mirrors[d].replacement->nr_pending);
1960                md_sync_acct(conf->mirrors[d].replacement->bdev,
1961                             wbio2->bi_size >> 9);
1962                generic_make_request(wbio2);
1963        }
1964}
1965
1966
1967/*
1968 * Used by fix_read_error() to decay the per rdev read_errors.
1969 * We halve the read error count for every hour that has elapsed
1970 * since the last recorded read error.
1971 *
1972 */
1973static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
1974{
1975        struct timespec cur_time_mon;
1976        unsigned long hours_since_last;
1977        unsigned int read_errors = atomic_read(&rdev->read_errors);
1978
1979        ktime_get_ts(&cur_time_mon);
1980
1981        if (rdev->last_read_error.tv_sec == 0 &&
1982            rdev->last_read_error.tv_nsec == 0) {
1983                /* first time we've seen a read error */
1984                rdev->last_read_error = cur_time_mon;
1985                return;
1986        }
1987
1988        hours_since_last = (cur_time_mon.tv_sec -
1989                            rdev->last_read_error.tv_sec) / 3600;
1990
1991        rdev->last_read_error = cur_time_mon;
1992
1993        /*
1994         * if hours_since_last is > the number of bits in read_errors
1995         * just set read errors to 0. We do this to avoid
1996         * overflowing the shift of read_errors by hours_since_last.
1997         */
1998        if (hours_since_last >= 8 * sizeof(read_errors))
1999                atomic_set(&rdev->read_errors, 0);
2000        else
2001                atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
2002}
2003
2004static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
2005                            int sectors, struct page *page, int rw)
2006{
2007        sector_t first_bad;
2008        int bad_sectors;
2009
2010        if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
2011            && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags)))
2012                return -1;
2013        if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
2014                /* success */
2015                return 1;
2016        if (rw == WRITE) {
2017                set_bit(WriteErrorSeen, &rdev->flags);
2018                if (!test_and_set_bit(WantReplacement, &rdev->flags))
2019                        set_bit(MD_RECOVERY_NEEDED,
2020                                &rdev->mddev->recovery);
2021        }
2022        /* need to record an error - either for the block or the device */
2023        if (!rdev_set_badblocks(rdev, sector, sectors, 0))
2024                md_error(rdev->mddev, rdev);
2025        return 0;
2026}
2027
2028/*
2029 * This is a kernel thread which:
2030 *
2031 *      1.      Retries failed read operations on working mirrors.
2032 *      2.      Updates the raid superblock when problems encounter.
2033 *      3.      Performs writes following reads for array synchronising.
2034 */
2035
2036static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio)
2037{
2038        int sect = 0; /* Offset from r10_bio->sector */
2039        int sectors = r10_bio->sectors;
2040        struct md_rdev*rdev;
2041        int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
2042        int d = r10_bio->devs[r10_bio->read_slot].devnum;
2043
2044        /* still own a reference to this rdev, so it cannot
2045         * have been cleared recently.
2046         */
2047        rdev = conf->mirrors[d].rdev;
2048
2049        if (test_bit(Faulty, &rdev->flags))
2050                /* drive has already been failed, just ignore any
2051                   more fix_read_error() attempts */
2052                return;
2053
2054        check_decay_read_errors(mddev, rdev);
2055        atomic_inc(&rdev->read_errors);
2056        if (atomic_read(&rdev->read_errors) > max_read_errors) {
2057                char b[BDEVNAME_SIZE];
2058                bdevname(rdev->bdev, b);
2059
2060                printk(KERN_NOTICE
2061                       "md/raid10:%s: %s: Raid device exceeded "
2062                       "read_error threshold [cur %d:max %d]\n",
2063                       mdname(mddev), b,
2064                       atomic_read(&rdev->read_errors), max_read_errors);
2065                printk(KERN_NOTICE
2066                       "md/raid10:%s: %s: Failing raid device\n",
2067                       mdname(mddev), b);
2068                md_error(mddev, conf->mirrors[d].rdev);
2069                r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED;
2070                return;
2071        }
2072
2073        while(sectors) {
2074                int s = sectors;
2075                int sl = r10_bio->read_slot;
2076                int success = 0;
2077                int start;
2078
2079                if (s > (PAGE_SIZE>>9))
2080                        s = PAGE_SIZE >> 9;
2081
2082                rcu_read_lock();
2083                do {
2084                        sector_t first_bad;
2085                        int bad_sectors;
2086
2087                        d = r10_bio->devs[sl].devnum;
2088                        rdev = rcu_dereference(conf->mirrors[d].rdev);
2089                        if (rdev &&
2090                            test_bit(In_sync, &rdev->flags) &&
2091                            is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
2092                                        &first_bad, &bad_sectors) == 0) {
2093                                atomic_inc(&rdev->nr_pending);
2094                                rcu_read_unlock();
2095                                success = sync_page_io(rdev,
2096                                                       r10_bio->devs[sl].addr +
2097                                                       sect,
2098                                                       s<<9,
2099                                                       conf->tmppage, READ, false);
2100                                rdev_dec_pending(rdev, mddev);
2101                                rcu_read_lock();
2102                                if (success)
2103                                        break;
2104                        }
2105                        sl++;
2106                        if (sl == conf->copies)
2107                                sl = 0;
2108                } while (!success && sl != r10_bio->read_slot);
2109                rcu_read_unlock();
2110
2111                if (!success) {
2112                        /* Cannot read from anywhere, just mark the block
2113                         * as bad on the first device to discourage future
2114                         * reads.
2115                         */
2116                        int dn = r10_bio->devs[r10_bio->read_slot].devnum;
2117                        rdev = conf->mirrors[dn].rdev;
2118
2119                        if (!rdev_set_badblocks(
2120                                    rdev,
2121                                    r10_bio->devs[r10_bio->read_slot].addr
2122                                    + sect,
2123                                    s, 0)) {
2124                                md_error(mddev, rdev);
2125                                r10_bio->devs[r10_bio->read_slot].bio
2126                                        = IO_BLOCKED;
2127                        }
2128                        break;
2129                }
2130
2131                start = sl;
2132                /* write it back and re-read */
2133                rcu_read_lock();
2134                while (sl != r10_bio->read_slot) {
2135                        char b[BDEVNAME_SIZE];
2136
2137                        if (sl==0)
2138                                sl = conf->copies;
2139                        sl--;
2140                        d = r10_bio->devs[sl].devnum;
2141                        rdev = rcu_dereference(conf->mirrors[d].rdev);
2142                        if (!rdev ||
2143                            !test_bit(In_sync, &rdev->flags))
2144                                continue;
2145
2146                        atomic_inc(&rdev->nr_pending);
2147                        rcu_read_unlock();
2148                        if (r10_sync_page_io(rdev,
2149                                             r10_bio->devs[sl].addr +
2150                                             sect,
2151                                             s<<9, conf->tmppage, WRITE)
2152                            == 0) {
2153                                /* Well, this device is dead */
2154                                printk(KERN_NOTICE
2155                                       "md/raid10:%s: read correction "
2156                                       "write failed"
2157                                       " (%d sectors at %llu on %s)\n",
2158                                       mdname(mddev), s,
2159                                       (unsigned long long)(
2160                                               sect + rdev->data_offset),
2161                                       bdevname(rdev->bdev, b));
2162                                printk(KERN_NOTICE "md/raid10:%s: %s: failing "
2163                                       "drive\n",
2164                                       mdname(mddev),
2165                                       bdevname(rdev->bdev, b));
2166                        }
2167                        rdev_dec_pending(rdev, mddev);
2168                        rcu_read_lock();
2169                }
2170                sl = start;
2171                while (sl != r10_bio->read_slot) {
2172                        char b[BDEVNAME_SIZE];
2173
2174                        if (sl==0)
2175                                sl = conf->copies;
2176                        sl--;
2177                        d = r10_bio->devs[sl].devnum;
2178                        rdev = rcu_dereference(conf->mirrors[d].rdev);
2179                        if (!rdev ||
2180                            !test_bit(In_sync, &rdev->flags))
2181                                continue;
2182
2183                        atomic_inc(&rdev->nr_pending);
2184                        rcu_read_unlock();
2185                        switch (r10_sync_page_io(rdev,
2186                                             r10_bio->devs[sl].addr +
2187                                             sect,
2188                                             s<<9, conf->tmppage,
2189                                                 READ)) {
2190                        case 0:
2191                                /* Well, this device is dead */
2192                                printk(KERN_NOTICE
2193                                       "md/raid10:%s: unable to read back "
2194                                       "corrected sectors"
2195                                       " (%d sectors at %llu on %s)\n",
2196                                       mdname(mddev), s,
2197                                       (unsigned long long)(
2198                                               sect + rdev->data_offset),
2199                                       bdevname(rdev->bdev, b));
2200                                printk(KERN_NOTICE "md/raid10:%s: %s: failing "
2201                                       "drive\n",
2202                                       mdname(mddev),
2203                                       bdevname(rdev->bdev, b));
2204                                break;
2205                        case 1:
2206                                printk(KERN_INFO
2207                                       "md/raid10:%s: read error corrected"
2208                                       " (%d sectors at %llu on %s)\n",
2209                                       mdname(mddev), s,
2210                                       (unsigned long long)(
2211                                               sect + rdev->data_offset),
2212                                       bdevname(rdev->bdev, b));
2213                                atomic_add(s, &rdev->corrected_errors);
2214                        }
2215
2216                        rdev_dec_pending(rdev, mddev);
2217                        rcu_read_lock();
2218                }
2219                rcu_read_unlock();
2220
2221                sectors -= s;
2222                sect += s;
2223        }
2224}
2225
2226static void bi_complete(struct bio *bio, int error)
2227{
2228        complete((struct completion *)bio->bi_private);
2229}
2230
2231static int submit_bio_wait(int rw, struct bio *bio)
2232{
2233        struct completion event;
2234        rw |= REQ_SYNC;
2235
2236        init_completion(&event);
2237        bio->bi_private = &event;
2238        bio->bi_end_io = bi_complete;
2239        submit_bio(rw, bio);
2240        wait_for_completion(&event);
2241
2242        return test_bit(BIO_UPTODATE, &bio->bi_flags);
2243}
2244
2245static int narrow_write_error(struct r10bio *r10_bio, int i)
2246{
2247        struct bio *bio = r10_bio->master_bio;
2248        struct mddev *mddev = r10_bio->mddev;
2249        struct r10conf *conf = mddev->private;
2250        struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev;
2251        /* bio has the data to be written to slot 'i' where
2252         * we just recently had a write error.
2253         * We repeatedly clone the bio and trim down to one block,
2254         * then try the write.  Where the write fails we record
2255         * a bad block.
2256         * It is conceivable that the bio doesn't exactly align with
2257         * blocks.  We must handle this.
2258         *
2259         * We currently own a reference to the rdev.
2260         */
2261
2262        int block_sectors;
2263        sector_t sector;
2264        int sectors;
2265        int sect_to_write = r10_bio->sectors;
2266        int ok = 1;
2267
2268        if (rdev->badblocks.shift < 0)
2269                return 0;
2270
2271        block_sectors = 1 << rdev->badblocks.shift;
2272        sector = r10_bio->sector;
2273        sectors = ((r10_bio->sector + block_sectors)
2274                   & ~(sector_t)(block_sectors - 1))
2275                - sector;
2276
2277        while (sect_to_write) {
2278                struct bio *wbio;
2279                if (sectors > sect_to_write)
2280                        sectors = sect_to_write;
2281                /* Write at 'sector' for 'sectors' */
2282                wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
2283                md_trim_bio(wbio, sector - bio->bi_sector, sectors);
2284                wbio->bi_sector = (r10_bio->devs[i].addr+
2285                                   rdev->data_offset+
2286                                   (sector - r10_bio->sector));
2287                wbio->bi_bdev = rdev->bdev;
2288                if (submit_bio_wait(WRITE, wbio) == 0)
2289                        /* Failure! */
2290                        ok = rdev_set_badblocks(rdev, sector,
2291                                                sectors, 0)
2292                                && ok;
2293
2294                bio_put(wbio);
2295                sect_to_write -= sectors;
2296                sector += sectors;
2297                sectors = block_sectors;
2298        }
2299        return ok;
2300}
2301
2302static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
2303{
2304        int slot = r10_bio->read_slot;
2305        struct bio *bio;
2306        struct r10conf *conf = mddev->private;
2307        struct md_rdev *rdev = r10_bio->devs[slot].rdev;
2308        char b[BDEVNAME_SIZE];
2309        unsigned long do_sync;
2310        int max_sectors;
2311
2312        /* we got a read error. Maybe the drive is bad.  Maybe just
2313         * the block and we can fix it.
2314         * We freeze all other IO, and try reading the block from
2315         * other devices.  When we find one, we re-write
2316         * and check it that fixes the read error.
2317         * This is all done synchronously while the array is
2318         * frozen.
2319         */
2320        bio = r10_bio->devs[slot].bio;
2321        bdevname(bio->bi_bdev, b);
2322        bio_put(bio);
2323        r10_bio->devs[slot].bio = NULL;
2324
2325        if (mddev->ro == 0) {
2326                freeze_array(conf);
2327                fix_read_error(conf, mddev, r10_bio);
2328                unfreeze_array(conf);
2329        } else
2330                r10_bio->devs[slot].bio = IO_BLOCKED;
2331
2332        rdev_dec_pending(rdev, mddev);
2333
2334read_more:
2335        rdev = read_balance(conf, r10_bio, &max_sectors);
2336        if (rdev == NULL) {
2337                printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
2338                       " read error for block %llu\n",
2339                       mdname(mddev), b,
2340                       (unsigned long long)r10_bio->sector);
2341                raid_end_bio_io(r10_bio);
2342                return;
2343        }
2344
2345        do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
2346        slot = r10_bio->read_slot;
2347        printk_ratelimited(
2348                KERN_ERR
2349                "md/raid10:%s: %s: redirecting"
2350                "sector %llu to another mirror\n",
2351                mdname(mddev),
2352                bdevname(rdev->bdev, b),
2353                (unsigned long long)r10_bio->sector);
2354        bio = bio_clone_mddev(r10_bio->master_bio,
2355                              GFP_NOIO, mddev);
2356        md_trim_bio(bio,
2357                    r10_bio->sector - bio->bi_sector,
2358                    max_sectors);
2359        r10_bio->devs[slot].bio = bio;
2360        r10_bio->devs[slot].rdev = rdev;
2361        bio->bi_sector = r10_bio->devs[slot].addr
2362                + rdev->data_offset;
2363        bio->bi_bdev = rdev->bdev;
2364        bio->bi_rw = READ | do_sync;
2365        bio->bi_private = r10_bio;
2366        bio->bi_end_io = raid10_end_read_request;
2367        if (max_sectors < r10_bio->sectors) {
2368                /* Drat - have to split this up more */
2369                struct bio *mbio = r10_bio->master_bio;
2370                int sectors_handled =
2371                        r10_bio->sector + max_sectors
2372                        - mbio->bi_sector;
2373                r10_bio->sectors = max_sectors;
2374                spin_lock_irq(&conf->device_lock);
2375                if (mbio->bi_phys_segments == 0)
2376                        mbio->bi_phys_segments = 2;
2377                else
2378                        mbio->bi_phys_segments++;
2379                spin_unlock_irq(&conf->device_lock);
2380                generic_make_request(bio);
2381
2382                r10_bio = mempool_alloc(conf->r10bio_pool,
2383                                        GFP_NOIO);
2384                r10_bio->master_bio = mbio;
2385                r10_bio->sectors = (mbio->bi_size >> 9)
2386                        - sectors_handled;
2387                r10_bio->state = 0;
2388                set_bit(R10BIO_ReadError,
2389                        &r10_bio->state);
2390                r10_bio->mddev = mddev;
2391                r10_bio->sector = mbio->bi_sector
2392                        + sectors_handled;
2393
2394                goto read_more;
2395        } else
2396                generic_make_request(bio);
2397}
2398
2399static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
2400{
2401        /* Some sort of write request has finished and it
2402         * succeeded in writing where we thought there was a
2403         * bad block.  So forget the bad block.
2404         * Or possibly if failed and we need to record
2405         * a bad block.
2406         */
2407        int m;
2408        struct md_rdev *rdev;
2409
2410        if (test_bit(R10BIO_IsSync, &r10_bio->state) ||
2411            test_bit(R10BIO_IsRecover, &r10_bio->state)) {
2412                for (m = 0; m < conf->copies; m++) {
2413                        int dev = r10_bio->devs[m].devnum;
2414                        rdev = conf->mirrors[dev].rdev;
2415                        if (r10_bio->devs[m].bio == NULL)
2416                                continue;
2417                        if (test_bit(BIO_UPTODATE,
2418                                     &r10_bio->devs[m].bio->bi_flags)) {
2419                                rdev_clear_badblocks(
2420                                        rdev,
2421                                        r10_bio->devs[m].addr,
2422                                        r10_bio->sectors);
2423                        } else {
2424                                if (!rdev_set_badblocks(
2425                                            rdev,
2426                                            r10_bio->devs[m].addr,
2427                                            r10_bio->sectors, 0))
2428                                        md_error(conf->mddev, rdev);
2429                        }
2430                        rdev = conf->mirrors[dev].replacement;
2431                        if (r10_bio->devs[m].repl_bio == NULL)
2432                                continue;
2433                        if (test_bit(BIO_UPTODATE,
2434                                     &r10_bio->devs[m].repl_bio->bi_flags)) {
2435                                rdev_clear_badblocks(
2436                                        rdev,
2437                                        r10_bio->devs[m].addr,
2438                                        r10_bio->sectors);
2439                        } else {
2440                                if (!rdev_set_badblocks(
2441                                            rdev,
2442                                            r10_bio->devs[m].addr,
2443                                            r10_bio->sectors, 0))
2444                                        md_error(conf->mddev, rdev);
2445                        }
2446                }
2447                put_buf(r10_bio);
2448        } else {
2449                for (m = 0; m < conf->copies; m++) {
2450                        int dev = r10_bio->devs[m].devnum;
2451                        struct bio *bio = r10_bio->devs[m].bio;
2452                        rdev = conf->mirrors[dev].rdev;
2453                        if (bio == IO_MADE_GOOD) {
2454                                rdev_clear_badblocks(
2455                                        rdev,
2456                                        r10_bio->devs[m].addr,
2457                                        r10_bio->sectors);
2458                                rdev_dec_pending(rdev, conf->mddev);
2459                        } else if (bio != NULL &&
2460                                   !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
2461                                if (!narrow_write_error(r10_bio, m)) {
2462                                        md_error(conf->mddev, rdev);
2463                                        set_bit(R10BIO_Degraded,
2464                                                &r10_bio->state);
2465                                }
2466                                rdev_dec_pending(rdev, conf->mddev);
2467                        }
2468                        bio = r10_bio->devs[m].repl_bio;
2469                        rdev = conf->mirrors[dev].replacement;
2470                        if (rdev && bio == IO_MADE_GOOD) {
2471                                rdev_clear_badblocks(
2472                                        rdev,
2473                                        r10_bio->devs[m].addr,
2474                                        r10_bio->sectors);
2475                                rdev_dec_pending(rdev, conf->mddev);
2476                        }
2477                }
2478                if (test_bit(R10BIO_WriteError,
2479                             &r10_bio->state))
2480                        close_write(r10_bio);
2481                raid_end_bio_io(r10_bio);
2482        }
2483}
2484
2485static void raid10d(struct mddev *mddev)
2486{
2487        struct r10bio *r10_bio;
2488        unsigned long flags;
2489        struct r10conf *conf = mddev->private;
2490        struct list_head *head = &conf->retry_list;
2491        struct blk_plug plug;
2492
2493        md_check_recovery(mddev);
2494
2495        blk_start_plug(&plug);
2496        for (;;) {
2497
2498                flush_pending_writes(conf);
2499
2500                spin_lock_irqsave(&conf->device_lock, flags);
2501                if (list_empty(head)) {
2502                        spin_unlock_irqrestore(&conf->device_lock, flags);
2503                        break;
2504                }
2505                r10_bio = list_entry(head->prev, struct r10bio, retry_list);
2506                list_del(head->prev);
2507                conf->nr_queued--;
2508                spin_unlock_irqrestore(&conf->device_lock, flags);
2509
2510                mddev = r10_bio->mddev;
2511                conf = mddev->private;
2512                if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
2513                    test_bit(R10BIO_WriteError, &r10_bio->state))
2514                        handle_write_completed(conf, r10_bio);
2515                else if (test_bit(R10BIO_IsSync, &r10_bio->state))
2516                        sync_request_write(mddev, r10_bio);
2517                else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
2518                        recovery_request_write(mddev, r10_bio);
2519                else if (test_bit(R10BIO_ReadError, &r10_bio->state))
2520                        handle_read_error(mddev, r10_bio);
2521                else {
2522                        /* just a partial read to be scheduled from a
2523                         * separate context
2524                         */
2525                        int slot = r10_bio->read_slot;
2526                        generic_make_request(r10_bio->devs[slot].bio);
2527                }
2528
2529                cond_resched();
2530                if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
2531                        md_check_recovery(mddev);
2532        }
2533        blk_finish_plug(&plug);
2534}
2535
2536
2537static int init_resync(struct r10conf *conf)
2538{
2539        int buffs;
2540        int i;
2541
2542        buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
2543        BUG_ON(conf->r10buf_pool);
2544        conf->have_replacement = 0;
2545        for (i = 0; i < conf->raid_disks; i++)
2546                if (conf->mirrors[i].replacement)
2547                        conf->have_replacement = 1;
2548        conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf);
2549        if (!conf->r10buf_pool)
2550                return -ENOMEM;
2551        conf->next_resync = 0;
2552        return 0;
2553}
2554
2555/*
2556 * perform a "sync" on one "block"
2557 *
2558 * We need to make sure that no normal I/O request - particularly write
2559 * requests - conflict with active sync requests.
2560 *
2561 * This is achieved by tracking pending requests and a 'barrier' concept
2562 * that can be installed to exclude normal IO requests.
2563 *
2564 * Resync and recovery are handled very differently.
2565 * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
2566 *
2567 * For resync, we iterate over virtual addresses, read all copies,
2568 * and update if there are differences.  If only one copy is live,
2569 * skip it.
2570 * For recovery, we iterate over physical addresses, read a good
2571 * value for each non-in_sync drive, and over-write.
2572 *
2573 * So, for recovery we may have several outstanding complex requests for a
2574 * given address, one for each out-of-sync device.  We model this by allocating
2575 * a number of r10_bio structures, one for each out-of-sync device.
2576 * As we setup these structures, we collect all bio's together into a list
2577 * which we then process collectively to add pages, and then process again
2578 * to pass to generic_make_request.
2579 *
2580 * The r10_bio structures are linked using a borrowed master_bio pointer.
2581 * This link is counted in ->remaining.  When the r10_bio that points to NULL
2582 * has its remaining count decremented to 0, the whole complex operation
2583 * is complete.
2584 *
2585 */
2586
2587static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
2588                             int *skipped, int go_faster)
2589{
2590        struct r10conf *conf = mddev->private;
2591        struct r10bio *r10_bio;
2592        struct bio *biolist = NULL, *bio;
2593        sector_t max_sector, nr_sectors;
2594        int i;
2595        int max_sync;
2596        sector_t sync_blocks;
2597        sector_t sectors_skipped = 0;
2598        int chunks_skipped = 0;
2599
2600        if (!conf->r10buf_pool)
2601                if (init_resync(conf))
2602                        return 0;
2603
2604 skipped:
2605        max_sector = mddev->dev_sectors;
2606        if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
2607                max_sector = mddev->resync_max_sectors;
2608        if (sector_nr >= max_sector) {
2609                /* If we aborted, we need to abort the
2610                 * sync on the 'current' bitmap chucks (there can
2611                 * be several when recovering multiple devices).
2612                 * as we may have started syncing it but not finished.
2613                 * We can find the current address in
2614                 * mddev->curr_resync, but for recovery,
2615                 * we need to convert that to several
2616                 * virtual addresses.
2617                 */
2618                if (mddev->curr_resync < max_sector) { /* aborted */
2619                        if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
2620                                bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2621                                                &sync_blocks, 1);
2622                        else for (i=0; i<conf->raid_disks; i++) {
2623                                sector_t sect =
2624                                        raid10_find_virt(conf, mddev->curr_resync, i);
2625                                bitmap_end_sync(mddev->bitmap, sect,
2626                                                &sync_blocks, 1);
2627                        }
2628                } else {
2629                        /* completed sync */
2630                        if ((!mddev->bitmap || conf->fullsync)
2631                            && conf->have_replacement
2632                            && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
2633                                /* Completed a full sync so the replacements
2634                                 * are now fully recovered.
2635                                 */
2636                                for (i = 0; i < conf->raid_disks; i++)
2637                                        if (conf->mirrors[i].replacement)
2638                                                conf->mirrors[i].replacement
2639                                                        ->recovery_offset
2640                                                        = MaxSector;
2641                        }
2642                        conf->fullsync = 0;
2643                }
2644                bitmap_close_sync(mddev->bitmap);
2645                close_sync(conf);
2646                *skipped = 1;
2647                return sectors_skipped;
2648        }
2649        if (chunks_skipped >= conf->raid_disks) {
2650                /* if there has been nothing to do on any drive,
2651                 * then there is nothing to do at all..
2652                 */
2653                *skipped = 1;
2654                return (max_sector - sector_nr) + sectors_skipped;
2655        }
2656
2657        if (max_sector > mddev->resync_max)
2658                max_sector = mddev->resync_max; /* Don't do IO beyond here */
2659
2660        /* make sure whole request will fit in a chunk - if chunks
2661         * are meaningful
2662         */
2663        if (conf->near_copies < conf->raid_disks &&
2664            max_sector > (sector_nr | conf->chunk_mask))
2665                max_sector = (sector_nr | conf->chunk_mask) + 1;
2666        /*
2667         * If there is non-resync activity waiting for us then
2668         * put in a delay to throttle resync.
2669         */
2670        if (!go_faster && conf->nr_waiting)
2671                msleep_interruptible(1000);
2672
2673        /* Again, very different code for resync and recovery.
2674         * Both must result in an r10bio with a list of bios that
2675         * have bi_end_io, bi_sector, bi_bdev set,
2676         * and bi_private set to the r10bio.
2677         * For recovery, we may actually create several r10bios
2678         * with 2 bios in each, that correspond to the bios in the main one.
2679         * In this case, the subordinate r10bios link back through a
2680         * borrowed master_bio pointer, and the counter in the master
2681         * includes a ref from each subordinate.
2682         */
2683        /* First, we decide what to do and set ->bi_end_io
2684         * To end_sync_read if we want to read, and
2685         * end_sync_write if we will want to write.
2686         */
2687
2688        max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
2689        if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
2690                /* recovery... the complicated one */
2691                int j;
2692                r10_bio = NULL;
2693
2694                for (i=0 ; i<conf->raid_disks; i++) {
2695                        int still_degraded;
2696                        struct r10bio *rb2;
2697                        sector_t sect;
2698                        int must_sync;
2699                        int any_working;
2700                        struct mirror_info *mirror = &conf->mirrors[i];
2701
2702                        if ((mirror->rdev == NULL ||
2703                             test_bit(In_sync, &mirror->rdev->flags))
2704                            &&
2705                            (mirror->replacement == NULL ||
2706                             test_bit(Faulty,
2707                                      &mirror->replacement->flags)))
2708                                continue;
2709
2710                        still_degraded = 0;
2711                        /* want to reconstruct this device */
2712                        rb2 = r10_bio;
2713                        sect = raid10_find_virt(conf, sector_nr, i);
2714                        /* Unless we are doing a full sync, or a replacement
2715                         * we only need to recover the block if it is set in
2716                         * the bitmap
2717                         */
2718                        must_sync = bitmap_start_sync(mddev->bitmap, sect,
2719                                                      &sync_blocks, 1);
2720                        if (sync_blocks < max_sync)
2721                                max_sync = sync_blocks;
2722                        if (!must_sync &&
2723                            mirror->replacement == NULL &&
2724                            !conf->fullsync) {
2725                                /* yep, skip the sync_blocks here, but don't assume
2726                                 * that there will never be anything to do here
2727                                 */
2728                                chunks_skipped = -1;
2729                                continue;
2730                        }
2731
2732                        r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
2733                        raise_barrier(conf, rb2 != NULL);
2734                        atomic_set(&r10_bio->remaining, 0);
2735
2736                        r10_bio->master_bio = (struct bio*)rb2;
2737                        if (rb2)
2738                                atomic_inc(&rb2->remaining);
2739                        r10_bio->mddev = mddev;
2740                        set_bit(R10BIO_IsRecover, &r10_bio->state);
2741                        r10_bio->sector = sect;
2742
2743                        raid10_find_phys(conf, r10_bio);
2744
2745                        /* Need to check if the array will still be
2746                         * degraded
2747                         */
2748                        for (j=0; j<conf->raid_disks; j++)
2749                                if (conf->mirrors[j].rdev == NULL ||
2750                                    test_bit(Faulty, &conf->mirrors[j].rdev->flags)) {
2751                                        still_degraded = 1;
2752                                        break;
2753                                }
2754
2755                        must_sync = bitmap_start_sync(mddev->bitmap, sect,
2756                                                      &sync_blocks, still_degraded);
2757
2758                        any_working = 0;
2759                        for (j=0; j<conf->copies;j++) {
2760                                int k;
2761                                int d = r10_bio->devs[j].devnum;
2762                                sector_t from_addr, to_addr;
2763                                struct md_rdev *rdev;
2764                                sector_t sector, first_bad;
2765                                int bad_sectors;
2766                                if (!conf->mirrors[d].rdev ||
2767                                    !test_bit(In_sync, &conf->mirrors[d].rdev->flags))
2768                                        continue;
2769                                /* This is where we read from */
2770                                any_working = 1;
2771                                rdev = conf->mirrors[d].rdev;
2772                                sector = r10_bio->devs[j].addr;
2773
2774                                if (is_badblock(rdev, sector, max_sync,
2775                                                &first_bad, &bad_sectors)) {
2776                                        if (first_bad > sector)
2777                                                max_sync = first_bad - sector;
2778                                        else {
2779                                                bad_sectors -= (sector
2780                                                                - first_bad);
2781                                                if (max_sync > bad_sectors)
2782                                                        max_sync = bad_sectors;
2783                                                continue;
2784                                        }
2785                                }
2786                                bio = r10_bio->devs[0].bio;
2787                                bio->bi_next = biolist;
2788                                biolist = bio;
2789                                bio->bi_private = r10_bio;
2790                                bio->bi_end_io = end_sync_read;
2791                                bio->bi_rw = READ;
2792                                from_addr = r10_bio->devs[j].addr;
2793                                bio->bi_sector = from_addr + rdev->data_offset;
2794                                bio->bi_bdev = rdev->bdev;
2795                                atomic_inc(&rdev->nr_pending);
2796                                /* and we write to 'i' (if not in_sync) */
2797
2798                                for (k=0; k<conf->copies; k++)
2799                                        if (r10_bio->devs[k].devnum == i)
2800                                                break;
2801                                BUG_ON(k == conf->copies);
2802                                to_addr = r10_bio->devs[k].addr;
2803                                r10_bio->devs[0].devnum = d;
2804                                r10_bio->devs[0].addr = from_addr;
2805                                r10_bio->devs[1].devnum = i;
2806                                r10_bio->devs[1].addr = to_addr;
2807
2808                                rdev = mirror->rdev;
2809                                if (!test_bit(In_sync, &rdev->flags)) {
2810                                        bio = r10_bio->devs[1].bio;
2811                                        bio->bi_next = biolist;
2812                                        biolist = bio;
2813                                        bio->bi_private = r10_bio;
2814                                        bio->bi_end_io = end_sync_write;
2815                                        bio->bi_rw = WRITE;
2816                                        bio->bi_sector = to_addr
2817                                                + rdev->data_offset;
2818                                        bio->bi_bdev = rdev->bdev;
2819                                        atomic_inc(&r10_bio->remaining);
2820                                } else
2821                                        r10_bio->devs[1].bio->bi_end_io = NULL;
2822
2823                                /* and maybe write to replacement */
2824                                bio = r10_bio->devs[1].repl_bio;
2825                                if (bio)
2826                                        bio->bi_end_io = NULL;
2827                                rdev = mirror->replacement;
2828                                /* Note: if rdev != NULL, then bio
2829                                 * cannot be NULL as r10buf_pool_alloc will
2830                                 * have allocated it.
2831                                 * So the second test here is pointless.
2832                                 * But it keeps semantic-checkers happy, and
2833                                 * this comment keeps human reviewers
2834                                 * happy.
2835                                 */
2836                                if (rdev == NULL || bio == NULL ||
2837                                    test_bit(Faulty, &rdev->flags))
2838                                        break;
2839                                bio->bi_next = biolist;
2840                                biolist = bio;
2841                                bio->bi_private = r10_bio;
2842                                bio->bi_end_io = end_sync_write;
2843                                bio->bi_rw = WRITE;
2844                                bio->bi_sector = to_addr + rdev->data_offset;
2845                                bio->bi_bdev = rdev->bdev;
2846                                atomic_inc(&r10_bio->remaining);
2847                                break;
2848                        }
2849                        if (j == conf->copies) {
2850                                /* Cannot recover, so abort the recovery or
2851                                 * record a bad block */
2852                                put_buf(r10_bio);
2853                                if (rb2)
2854                                        atomic_dec(&rb2->remaining);
2855                                r10_bio = rb2;
2856                                if (any_working) {
2857                                        /* problem is that there are bad blocks
2858                                         * on other device(s)
2859                                         */
2860                                        int k;
2861                                        for (k = 0; k < conf->copies; k++)
2862                                                if (r10_bio->devs[k].devnum == i)
2863                                                        break;
2864                                        if (!test_bit(In_sync,
2865                                                      &mirror->rdev->flags)
2866                                            && !rdev_set_badblocks(
2867                                                    mirror->rdev,
2868                                                    r10_bio->devs[k].addr,
2869                                                    max_sync, 0))
2870                                                any_working = 0;
2871                                        if (mirror->replacement &&
2872                                            !rdev_set_badblocks(
2873                                                    mirror->replacement,
2874                                                    r10_bio->devs[k].addr,
2875                                                    max_sync, 0))
2876                                                any_working = 0;
2877                                }
2878                                if (!any_working)  {
2879                                        if (!test_and_set_bit(MD_RECOVERY_INTR,
2880                                                              &mddev->recovery))
2881                                                printk(KERN_INFO "md/raid10:%s: insufficient "
2882                                                       "working devices for recovery.\n",
2883                                                       mdname(mddev));
2884                                        mirror->recovery_disabled
2885                                                = mddev->recovery_disabled;
2886                                }
2887                                break;
2888                        }
2889                }
2890                if (biolist == NULL) {
2891                        while (r10_bio) {
2892                                struct r10bio *rb2 = r10_bio;
2893                                r10_bio = (struct r10bio*) rb2->master_bio;
2894                                rb2->master_bio = NULL;
2895                                put_buf(rb2);
2896                        }
2897                        goto giveup;
2898                }
2899        } else {
2900                /* resync. Schedule a read for every block at this virt offset */
2901                int count = 0;
2902
2903                bitmap_cond_end_sync(mddev->bitmap, sector_nr);
2904
2905                if (!bitmap_start_sync(mddev->bitmap, sector_nr,
2906                                       &sync_blocks, mddev->degraded) &&
2907                    !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED,
2908                                                 &mddev->recovery)) {
2909                        /* We can skip this block */
2910                        *skipped = 1;
2911                        return sync_blocks + sectors_skipped;
2912                }
2913                if (sync_blocks < max_sync)
2914                        max_sync = sync_blocks;
2915                r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
2916
2917                r10_bio->mddev = mddev;
2918                atomic_set(&r10_bio->remaining, 0);
2919                raise_barrier(conf, 0);
2920                conf->next_resync = sector_nr;
2921
2922                r10_bio->master_bio = NULL;
2923                r10_bio->sector = sector_nr;
2924                set_bit(R10BIO_IsSync, &r10_bio->state);
2925                raid10_find_phys(conf, r10_bio);
2926                r10_bio->sectors = (sector_nr | conf->chunk_mask) - sector_nr +1;
2927
2928                for (i=0; i<conf->copies; i++) {
2929                        int d = r10_bio->devs[i].devnum;
2930                        sector_t first_bad, sector;
2931                        int bad_sectors;
2932
2933                        if (r10_bio->devs[i].repl_bio)
2934                                r10_bio->devs[i].repl_bio->bi_end_io = NULL;
2935
2936                        bio = r10_bio->devs[i].bio;
2937                        bio->bi_end_io = NULL;
2938                        clear_bit(BIO_UPTODATE, &bio->bi_flags);
2939                        if (conf->mirrors[d].rdev == NULL ||
2940                            test_bit(Faulty, &conf->mirrors[d].rdev->flags))
2941                                continue;
2942                        sector = r10_bio->devs[i].addr;
2943                        if (is_badblock(conf->mirrors[d].rdev,
2944                                        sector, max_sync,
2945                                        &first_bad, &bad_sectors)) {
2946                                if (first_bad > sector)
2947                                        max_sync = first_bad - sector;
2948                                else {
2949                                        bad_sectors -= (sector - first_bad);
2950                                        if (max_sync > bad_sectors)
2951                                                max_sync = max_sync;
2952                                        continue;
2953                                }
2954                        }
2955                        atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2956                        atomic_inc(&r10_bio->remaining);
2957                        bio->bi_next = biolist;
2958                        biolist = bio;
2959                        bio->bi_private = r10_bio;
2960                        bio->bi_end_io = end_sync_read;
2961                        bio->bi_rw = READ;
2962                        bio->bi_sector = sector +
2963                                conf->mirrors[d].rdev->data_offset;
2964                        bio->bi_bdev = conf->mirrors[d].rdev->bdev;
2965                        count++;
2966
2967                        if (conf->mirrors[d].replacement == NULL ||
2968                            test_bit(Faulty,
2969                                     &conf->mirrors[d].replacement->flags))
2970                                continue;
2971
2972                        /* Need to set up for writing to the replacement */
2973                        bio = r10_bio->devs[i].repl_bio;
2974                        clear_bit(BIO_UPTODATE, &bio->bi_flags);
2975
2976                        sector = r10_bio->devs[i].addr;
2977                        atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2978                        bio->bi_next = biolist;
2979                        biolist = bio;
2980                        bio->bi_private = r10_bio;
2981                        bio->bi_end_io = end_sync_write;
2982                        bio->bi_rw = WRITE;
2983                        bio->bi_sector = sector +
2984                                conf->mirrors[d].replacement->data_offset;
2985                        bio->bi_bdev = conf->mirrors[d].replacement->bdev;
2986                        count++;
2987                }
2988
2989                if (count < 2) {
2990                        for (i=0; i<conf->copies; i++) {
2991                                int d = r10_bio->devs[i].devnum;
2992                                if (r10_bio->devs[i].bio->bi_end_io)
2993                                        rdev_dec_pending(conf->mirrors[d].rdev,
2994                                                         mddev);
2995                                if (r10_bio->devs[i].repl_bio &&
2996                                    r10_bio->devs[i].repl_bio->bi_end_io)
2997                                        rdev_dec_pending(
2998                                                conf->mirrors[d].replacement,
2999                                                mddev);
3000                        }
3001                        put_buf(r10_bio);
3002                        biolist = NULL;
3003                        goto giveup;
3004                }
3005        }
3006
3007        for (bio = biolist; bio ; bio=bio->bi_next) {
3008
3009                bio->bi_flags &= ~(BIO_POOL_MASK - 1);
3010                if (bio->bi_end_io)
3011                        bio->bi_flags |= 1 << BIO_UPTODATE;
3012                bio->bi_vcnt = 0;
3013                bio->bi_idx = 0;
3014                bio->bi_phys_segments = 0;
3015                bio->bi_size = 0;
3016        }
3017
3018        nr_sectors = 0;
3019        if (sector_nr + max_sync < max_sector)
3020                max_sector = sector_nr + max_sync;
3021        do {
3022                struct page *page;
3023                int len = PAGE_SIZE;
3024                if (sector_nr + (len>>9) > max_sector)
3025                        len = (max_sector - sector_nr) << 9;
3026                if (len == 0)
3027                        break;
3028                for (bio= biolist ; bio ; bio=bio->bi_next) {
3029                        struct bio *bio2;
3030                        page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
3031                        if (bio_add_page(bio, page, len, 0))
3032                                continue;
3033
3034                        /* stop here */
3035                        bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
3036                        for (bio2 = biolist;
3037                             bio2 && bio2 != bio;
3038                             bio2 = bio2->bi_next) {
3039                                /* remove last page from this bio */
3040                                bio2->bi_vcnt--;
3041                                bio2->bi_size -= len;
3042                                bio2->bi_flags &= ~(1<< BIO_SEG_VALID);
3043                        }
3044                        goto bio_full;
3045                }
3046                nr_sectors += len>>9;
3047                sector_nr += len>>9;
3048        } while (biolist->bi_vcnt < RESYNC_PAGES);
3049 bio_full:
3050        r10_bio->sectors = nr_sectors;
3051
3052        while (biolist) {
3053                bio = biolist;
3054                biolist = biolist->bi_next;
3055
3056                bio->bi_next = NULL;
3057                r10_bio = bio->bi_private;
3058                r10_bio->sectors = nr_sectors;
3059
3060                if (bio->bi_end_io == end_sync_read) {
3061                        md_sync_acct(bio->bi_bdev, nr_sectors);
3062                        generic_make_request(bio);
3063                }
3064        }
3065
3066        if (sectors_skipped)
3067                /* pretend they weren't skipped, it makes
3068                 * no important difference in this case
3069                 */
3070                md_done_sync(mddev, sectors_skipped, 1);
3071
3072        return sectors_skipped + nr_sectors;
3073 giveup:
3074        /* There is nowhere to write, so all non-sync
3075         * drives must be failed or in resync, all drives
3076         * have a bad block, so try the next chunk...
3077         */
3078        if (sector_nr + max_sync < max_sector)
3079                max_sector = sector_nr + max_sync;
3080
3081        sectors_skipped += (max_sector - sector_nr);
3082        chunks_skipped ++;
3083        sector_nr = max_sector;
3084        goto skipped;
3085}
3086
3087static sector_t
3088raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks)
3089{
3090        sector_t size;
3091        struct r10conf *conf = mddev->private;
3092
3093        if (!raid_disks)
3094                raid_disks = conf->raid_disks;
3095        if (!sectors)
3096                sectors = conf->dev_sectors;
3097
3098        size = sectors >> conf->chunk_shift;
3099        sector_div(size, conf->far_copies);
3100        size = size * raid_disks;
3101        sector_div(size, conf->near_copies);
3102
3103        return size << conf->chunk_shift;
3104}
3105
3106
3107static struct r10conf *setup_conf(struct mddev *mddev)
3108{
3109        struct r10conf *conf = NULL;
3110        int nc, fc, fo;
3111        sector_t stride, size;
3112        int err = -EINVAL;
3113
3114        if (mddev->new_chunk_sectors < (PAGE_SIZE >> 9) ||
3115            !is_power_of_2(mddev->new_chunk_sectors)) {
3116                printk(KERN_ERR "md/raid10:%s: chunk size must be "
3117                       "at least PAGE_SIZE(%ld) and be a power of 2.\n",
3118                       mdname(mddev), PAGE_SIZE);
3119                goto out;
3120        }
3121
3122        nc = mddev->new_layout & 255;
3123        fc = (mddev->new_layout >> 8) & 255;
3124        fo = mddev->new_layout & (1<<16);
3125
3126        if ((nc*fc) <2 || (nc*fc) > mddev->raid_disks ||
3127            (mddev->new_layout >> 17)) {
3128                printk(KERN_ERR "md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
3129                       mdname(mddev), mddev->new_layout);
3130                goto out;
3131        }
3132
3133        err = -ENOMEM;
3134        conf = kzalloc(sizeof(struct r10conf), GFP_KERNEL);
3135        if (!conf)
3136                goto out;
3137
3138        conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
3139                                GFP_KERNEL);
3140        if (!conf->mirrors)
3141                goto out;
3142
3143        conf->tmppage = alloc_page(GFP_KERNEL);
3144        if (!conf->tmppage)
3145                goto out;
3146
3147
3148        conf->raid_disks = mddev->raid_disks;
3149        conf->near_copies = nc;
3150        conf->far_copies = fc;
3151        conf->copies = nc*fc;
3152        conf->far_offset = fo;
3153        conf->chunk_mask = mddev->new_chunk_sectors - 1;
3154        conf->chunk_shift = ffz(~mddev->new_chunk_sectors);
3155
3156        conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
3157                                           r10bio_pool_free, conf);
3158        if (!conf->r10bio_pool)
3159                goto out;
3160
3161        size = mddev->dev_sectors >> conf->chunk_shift;
3162        sector_div(size, fc);
3163        size = size * conf->raid_disks;
3164        sector_div(size, nc);
3165        /* 'size' is now the number of chunks in the array */
3166        /* calculate "used chunks per device" in 'stride' */
3167        stride = size * conf->copies;
3168
3169        /* We need to round up when dividing by raid_disks to
3170         * get the stride size.
3171         */
3172        stride += conf->raid_disks - 1;
3173        sector_div(stride, conf->raid_disks);
3174
3175        conf->dev_sectors = stride << conf->chunk_shift;
3176
3177        if (fo)
3178                stride = 1;
3179        else
3180                sector_div(stride, fc);
3181        conf->stride = stride << conf->chunk_shift;
3182
3183
3184        spin_lock_init(&conf->device_lock);
3185        INIT_LIST_HEAD(&conf->retry_list);
3186
3187        spin_lock_init(&conf->resync_lock);
3188        init_waitqueue_head(&conf->wait_barrier);
3189
3190        conf->thread = md_register_thread(raid10d, mddev, NULL);
3191        if (!conf->thread)
3192                goto out;
3193
3194        conf->mddev = mddev;
3195        return conf;
3196
3197 out:
3198        printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
3199               mdname(mddev));
3200        if (conf) {
3201                if (conf->r10bio_pool)
3202                        mempool_destroy(conf->r10bio_pool);
3203                kfree(conf->mirrors);
3204                safe_put_page(conf->tmppage);
3205                kfree(conf);
3206        }
3207        return ERR_PTR(err);
3208}
3209
3210static int run(struct mddev *mddev)
3211{
3212        struct r10conf *conf;
3213        int i, disk_idx, chunk_size;
3214        struct mirror_info *disk;
3215        struct md_rdev *rdev;
3216        sector_t size;
3217
3218        /*
3219         * copy the already verified devices into our private RAID10
3220         * bookkeeping area. [whatever we allocate in run(),
3221         * should be freed in stop()]
3222         */
3223
3224        if (mddev->private == NULL) {
3225                conf = setup_conf(mddev);
3226                if (IS_ERR(conf))
3227                        return PTR_ERR(conf);
3228                mddev->private = conf;
3229        }
3230        conf = mddev->private;
3231        if (!conf)
3232                goto out;
3233
3234        mddev->thread = conf->thread;
3235        conf->thread = NULL;
3236
3237        chunk_size = mddev->chunk_sectors << 9;
3238        blk_queue_io_min(mddev->queue, chunk_size);
3239        if (conf->raid_disks % conf->near_copies)
3240                blk_queue_io_opt(mddev->queue, chunk_size * conf->raid_disks);
3241        else
3242                blk_queue_io_opt(mddev->queue, chunk_size *
3243                                 (conf->raid_disks / conf->near_copies));
3244
3245        list_for_each_entry(rdev, &mddev->disks, same_set) {
3246
3247                disk_idx = rdev->raid_disk;
3248                if (disk_idx >= conf->raid_disks
3249                    || disk_idx < 0)
3250                        continue;
3251                disk = conf->mirrors + disk_idx;
3252
3253                if (test_bit(Replacement, &rdev->flags)) {
3254                        if (disk->replacement)
3255                                goto out_free_conf;
3256                        disk->replacement = rdev;
3257                } else {
3258                        if (disk->rdev)
3259                                goto out_free_conf;
3260                        disk->rdev = rdev;
3261                }
3262
3263                disk_stack_limits(mddev->gendisk, rdev->bdev,
3264                                  rdev->data_offset << 9);
3265                /* as we don't honour merge_bvec_fn, we must never risk
3266                 * violating it, so limit max_segments to 1 lying
3267                 * within a single page.
3268                 */
3269                if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
3270                        blk_queue_max_segments(mddev->queue, 1);
3271                        blk_queue_segment_boundary(mddev->queue,
3272                                                   PAGE_CACHE_SIZE - 1);
3273                }
3274
3275                disk->head_position = 0;
3276        }
3277        /* need to check that every block has at least one working mirror */
3278        if (!enough(conf, -1)) {
3279                printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n",
3280                       mdname(mddev));
3281                goto out_free_conf;
3282        }
3283
3284        mddev->degraded = 0;
3285        for (i = 0; i < conf->raid_disks; i++) {
3286
3287                disk = conf->mirrors + i;
3288
3289                if (!disk->rdev && disk->replacement) {
3290                        /* The replacement is all we have - use it */
3291                        disk->rdev = disk->replacement;
3292                        disk->replacement = NULL;
3293                        clear_bit(Replacement, &disk->rdev->flags);
3294                }
3295
3296                if (!disk->rdev ||
3297                    !test_bit(In_sync, &disk->rdev->flags)) {
3298                        disk->head_position = 0;
3299                        mddev->degraded++;
3300                        if (disk->rdev)
3301                                conf->fullsync = 1;
3302                }
3303                disk->recovery_disabled = mddev->recovery_disabled - 1;
3304        }
3305
3306        if (mddev->recovery_cp != MaxSector)
3307                printk(KERN_NOTICE "md/raid10:%s: not clean"
3308                       " -- starting background reconstruction\n",
3309                       mdname(mddev));
3310        printk(KERN_INFO
3311                "md/raid10:%s: active with %d out of %d devices\n",
3312                mdname(mddev), conf->raid_disks - mddev->degraded,
3313                conf->raid_disks);
3314        /*
3315         * Ok, everything is just fine now
3316         */
3317        mddev->dev_sectors = conf->dev_sectors;
3318        size = raid10_size(mddev, 0, 0);
3319        md_set_array_sectors(mddev, size);
3320        mddev->resync_max_sectors = size;
3321
3322        mddev->queue->backing_dev_info.congested_fn = raid10_congested;
3323        mddev->queue->backing_dev_info.congested_data = mddev;
3324
3325        /* Calculate max read-ahead size.
3326         * We need to readahead at least twice a whole stripe....
3327         * maybe...
3328         */
3329        {
3330                int stripe = conf->raid_disks *
3331                        ((mddev->chunk_sectors << 9) / PAGE_SIZE);
3332                stripe /= conf->near_copies;
3333                if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
3334                        mddev->queue->backing_dev_info.ra_pages = 2* stripe;
3335        }
3336
3337        if (conf->near_copies < conf->raid_disks)
3338                blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
3339
3340        if (md_integrity_register(mddev))
3341                goto out_free_conf;
3342
3343        return 0;
3344
3345out_free_conf:
3346        md_unregister_thread(&mddev->thread);
3347        if (conf->r10bio_pool)
3348                mempool_destroy(conf->r10bio_pool);
3349        safe_put_page(conf->tmppage);
3350        kfree(conf->mirrors);
3351        kfree(conf);
3352        mddev->private = NULL;
3353out:
3354        return -EIO;
3355}
3356
3357static int stop(struct mddev *mddev)
3358{
3359        struct r10conf *conf = mddev->private;
3360
3361        raise_barrier(conf, 0);
3362        lower_barrier(conf);
3363
3364        md_unregister_thread(&mddev->thread);
3365        blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
3366        if (conf->r10bio_pool)
3367                mempool_destroy(conf->r10bio_pool);
3368        kfree(conf->mirrors);
3369        kfree(conf);
3370        mddev->private = NULL;
3371        return 0;
3372}
3373
3374static void raid10_quiesce(struct mddev *mddev, int state)
3375{
3376        struct r10conf *conf = mddev->private;
3377
3378        switch(state) {
3379        case 1:
3380                raise_barrier(conf, 0);
3381                break;
3382        case 0:
3383                lower_barrier(conf);
3384                break;
3385        }
3386}
3387
3388static void *raid10_takeover_raid0(struct mddev *mddev)
3389{
3390        struct md_rdev *rdev;
3391        struct r10conf *conf;
3392
3393        if (mddev->degraded > 0) {
3394                printk(KERN_ERR "md/raid10:%s: Error: degraded raid0!\n",
3395                       mdname(mddev));
3396                return ERR_PTR(-EINVAL);
3397        }
3398
3399        /* Set new parameters */
3400        mddev->new_level = 10;
3401        /* new layout: far_copies = 1, near_copies = 2 */
3402        mddev->new_layout = (1<<8) + 2;
3403        mddev->new_chunk_sectors = mddev->chunk_sectors;
3404        mddev->delta_disks = mddev->raid_disks;
3405        mddev->raid_disks *= 2;
3406        /* make sure it will be not marked as dirty */
3407        mddev->recovery_cp = MaxSector;
3408
3409        conf = setup_conf(mddev);
3410        if (!IS_ERR(conf)) {
3411                list_for_each_entry(rdev, &mddev->disks, same_set)
3412                        if (rdev->raid_disk >= 0)
3413                                rdev->new_raid_disk = rdev->raid_disk * 2;
3414                conf->barrier = 1;
3415        }
3416
3417        return conf;
3418}
3419
3420static void *raid10_takeover(struct mddev *mddev)
3421{
3422        struct r0conf *raid0_conf;
3423
3424        /* raid10 can take over:
3425         *  raid0 - providing it has only two drives
3426         */
3427        if (mddev->level == 0) {
3428                /* for raid0 takeover only one zone is supported */
3429                raid0_conf = mddev->private;
3430                if (raid0_conf->nr_strip_zones > 1) {
3431                        printk(KERN_ERR "md/raid10:%s: cannot takeover raid 0"
3432                               " with more than one zone.\n",
3433                               mdname(mddev));
3434                        return ERR_PTR(-EINVAL);
3435                }
3436                return raid10_takeover_raid0(mddev);
3437        }
3438        return ERR_PTR(-EINVAL);
3439}
3440
3441static struct md_personality raid10_personality =
3442{
3443        .name           = "raid10",
3444        .level          = 10,
3445        .owner          = THIS_MODULE,
3446        .make_request   = make_request,
3447        .run            = run,
3448        .stop           = stop,
3449        .status         = status,
3450        .error_handler  = error,
3451        .hot_add_disk   = raid10_add_disk,
3452        .hot_remove_disk= raid10_remove_disk,
3453        .spare_active   = raid10_spare_active,
3454        .sync_request   = sync_request,
3455        .quiesce        = raid10_quiesce,
3456        .size           = raid10_size,
3457        .takeover       = raid10_takeover,
3458};
3459
3460static int __init raid_init(void)
3461{
3462        return register_md_personality(&raid10_personality);
3463}
3464
3465static void raid_exit(void)
3466{
3467        unregister_md_personality(&raid10_personality);
3468}
3469
3470module_init(raid_init);
3471module_exit(raid_exit);
3472MODULE_LICENSE("GPL");
3473MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
3474MODULE_ALIAS("md-personality-9"); /* RAID10 */
3475MODULE_ALIAS("md-raid10");
3476MODULE_ALIAS("md-level-10");
3477
3478module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);
3479