linux/drivers/md/raid1.c
<<
>>
Prefs
   1/*
   2 * raid1.c : Multiple Devices driver for Linux
   3 *
   4 * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
   5 *
   6 * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
   7 *
   8 * RAID-1 management functions.
   9 *
  10 * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
  11 *
  12 * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk>
  13 * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
  14 *
  15 * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
  16 * bitmapped intelligence in resync:
  17 *
  18 *      - bitmap marked during normal i/o
  19 *      - bitmap used to skip nondirty blocks during sync
  20 *
  21 * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
  22 * - persistent bitmap code
  23 *
  24 * This program is free software; you can redistribute it and/or modify
  25 * it under the terms of the GNU General Public License as published by
  26 * the Free Software Foundation; either version 2, or (at your option)
  27 * any later version.
  28 *
  29 * You should have received a copy of the GNU General Public License
  30 * (for example /usr/src/linux/COPYING); if not, write to the Free
  31 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  32 */
  33
  34#include <linux/delay.h>
  35#include <linux/blkdev.h>
  36#include <linux/seq_file.h>
  37#include "md.h"
  38#include "raid1.h"
  39#include "bitmap.h"
  40
  41#define DEBUG 0
  42#if DEBUG
  43#define PRINTK(x...) printk(x)
  44#else
  45#define PRINTK(x...)
  46#endif
  47
  48/*
  49 * Number of guaranteed r1bios in case of extreme VM load:
  50 */
  51#define NR_RAID1_BIOS 256
  52
  53
  54static void unplug_slaves(mddev_t *mddev);
  55
  56static void allow_barrier(conf_t *conf);
  57static void lower_barrier(conf_t *conf);
  58
  59static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
  60{
  61        struct pool_info *pi = data;
  62        r1bio_t *r1_bio;
  63        int size = offsetof(r1bio_t, bios[pi->raid_disks]);
  64
  65        /* allocate a r1bio with room for raid_disks entries in the bios array */
  66        r1_bio = kzalloc(size, gfp_flags);
  67        if (!r1_bio && pi->mddev)
  68                unplug_slaves(pi->mddev);
  69
  70        return r1_bio;
  71}
  72
  73static void r1bio_pool_free(void *r1_bio, void *data)
  74{
  75        kfree(r1_bio);
  76}
  77
  78#define RESYNC_BLOCK_SIZE (64*1024)
  79//#define RESYNC_BLOCK_SIZE PAGE_SIZE
  80#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
  81#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
  82#define RESYNC_WINDOW (2048*1024)
  83
  84static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
  85{
  86        struct pool_info *pi = data;
  87        struct page *page;
  88        r1bio_t *r1_bio;
  89        struct bio *bio;
  90        int i, j;
  91
  92        r1_bio = r1bio_pool_alloc(gfp_flags, pi);
  93        if (!r1_bio) {
  94                unplug_slaves(pi->mddev);
  95                return NULL;
  96        }
  97
  98        /*
  99         * Allocate bios : 1 for reading, n-1 for writing
 100         */
 101        for (j = pi->raid_disks ; j-- ; ) {
 102                bio = bio_alloc(gfp_flags, RESYNC_PAGES);
 103                if (!bio)
 104                        goto out_free_bio;
 105                r1_bio->bios[j] = bio;
 106        }
 107        /*
 108         * Allocate RESYNC_PAGES data pages and attach them to
 109         * the first bio.
 110         * If this is a user-requested check/repair, allocate
 111         * RESYNC_PAGES for each bio.
 112         */
 113        if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
 114                j = pi->raid_disks;
 115        else
 116                j = 1;
 117        while(j--) {
 118                bio = r1_bio->bios[j];
 119                for (i = 0; i < RESYNC_PAGES; i++) {
 120                        page = alloc_page(gfp_flags);
 121                        if (unlikely(!page))
 122                                goto out_free_pages;
 123
 124                        bio->bi_io_vec[i].bv_page = page;
 125                        bio->bi_vcnt = i+1;
 126                }
 127        }
 128        /* If not user-requests, copy the page pointers to all bios */
 129        if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
 130                for (i=0; i<RESYNC_PAGES ; i++)
 131                        for (j=1; j<pi->raid_disks; j++)
 132                                r1_bio->bios[j]->bi_io_vec[i].bv_page =
 133                                        r1_bio->bios[0]->bi_io_vec[i].bv_page;
 134        }
 135
 136        r1_bio->master_bio = NULL;
 137
 138        return r1_bio;
 139
 140out_free_pages:
 141        for (j=0 ; j < pi->raid_disks; j++)
 142                for (i=0; i < r1_bio->bios[j]->bi_vcnt ; i++)
 143                        put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page);
 144        j = -1;
 145out_free_bio:
 146        while ( ++j < pi->raid_disks )
 147                bio_put(r1_bio->bios[j]);
 148        r1bio_pool_free(r1_bio, data);
 149        return NULL;
 150}
 151
 152static void r1buf_pool_free(void *__r1_bio, void *data)
 153{
 154        struct pool_info *pi = data;
 155        int i,j;
 156        r1bio_t *r1bio = __r1_bio;
 157
 158        for (i = 0; i < RESYNC_PAGES; i++)
 159                for (j = pi->raid_disks; j-- ;) {
 160                        if (j == 0 ||
 161                            r1bio->bios[j]->bi_io_vec[i].bv_page !=
 162                            r1bio->bios[0]->bi_io_vec[i].bv_page)
 163                                safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page);
 164                }
 165        for (i=0 ; i < pi->raid_disks; i++)
 166                bio_put(r1bio->bios[i]);
 167
 168        r1bio_pool_free(r1bio, data);
 169}
 170
 171static void put_all_bios(conf_t *conf, r1bio_t *r1_bio)
 172{
 173        int i;
 174
 175        for (i = 0; i < conf->raid_disks; i++) {
 176                struct bio **bio = r1_bio->bios + i;
 177                if (*bio && *bio != IO_BLOCKED)
 178                        bio_put(*bio);
 179                *bio = NULL;
 180        }
 181}
 182
 183static void free_r1bio(r1bio_t *r1_bio)
 184{
 185        conf_t *conf = r1_bio->mddev->private;
 186
 187        /*
 188         * Wake up any possible resync thread that waits for the device
 189         * to go idle.
 190         */
 191        allow_barrier(conf);
 192
 193        put_all_bios(conf, r1_bio);
 194        mempool_free(r1_bio, conf->r1bio_pool);
 195}
 196
 197static void put_buf(r1bio_t *r1_bio)
 198{
 199        conf_t *conf = r1_bio->mddev->private;
 200        int i;
 201
 202        for (i=0; i<conf->raid_disks; i++) {
 203                struct bio *bio = r1_bio->bios[i];
 204                if (bio->bi_end_io)
 205                        rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
 206        }
 207
 208        mempool_free(r1_bio, conf->r1buf_pool);
 209
 210        lower_barrier(conf);
 211}
 212
 213static void reschedule_retry(r1bio_t *r1_bio)
 214{
 215        unsigned long flags;
 216        mddev_t *mddev = r1_bio->mddev;
 217        conf_t *conf = mddev->private;
 218
 219        spin_lock_irqsave(&conf->device_lock, flags);
 220        list_add(&r1_bio->retry_list, &conf->retry_list);
 221        conf->nr_queued ++;
 222        spin_unlock_irqrestore(&conf->device_lock, flags);
 223
 224        wake_up(&conf->wait_barrier);
 225        md_wakeup_thread(mddev->thread);
 226}
 227
 228/*
 229 * raid_end_bio_io() is called when we have finished servicing a mirrored
 230 * operation and are ready to return a success/failure code to the buffer
 231 * cache layer.
 232 */
 233static void raid_end_bio_io(r1bio_t *r1_bio)
 234{
 235        struct bio *bio = r1_bio->master_bio;
 236
 237        /* if nobody has done the final endio yet, do it now */
 238        if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
 239                PRINTK(KERN_DEBUG "raid1: sync end %s on sectors %llu-%llu\n",
 240                        (bio_data_dir(bio) == WRITE) ? "write" : "read",
 241                        (unsigned long long) bio->bi_sector,
 242                        (unsigned long long) bio->bi_sector +
 243                                (bio->bi_size >> 9) - 1);
 244
 245                bio_endio(bio,
 246                        test_bit(R1BIO_Uptodate, &r1_bio->state) ? 0 : -EIO);
 247        }
 248        free_r1bio(r1_bio);
 249}
 250
 251/*
 252 * Update disk head position estimator based on IRQ completion info.
 253 */
 254static inline void update_head_pos(int disk, r1bio_t *r1_bio)
 255{
 256        conf_t *conf = r1_bio->mddev->private;
 257
 258        conf->mirrors[disk].head_position =
 259                r1_bio->sector + (r1_bio->sectors);
 260}
 261
 262static void raid1_end_read_request(struct bio *bio, int error)
 263{
 264        int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
 265        r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
 266        int mirror;
 267        conf_t *conf = r1_bio->mddev->private;
 268
 269        mirror = r1_bio->read_disk;
 270        /*
 271         * this branch is our 'one mirror IO has finished' event handler:
 272         */
 273        update_head_pos(mirror, r1_bio);
 274
 275        if (uptodate)
 276                set_bit(R1BIO_Uptodate, &r1_bio->state);
 277        else {
 278                /* If all other devices have failed, we want to return
 279                 * the error upwards rather than fail the last device.
 280                 * Here we redefine "uptodate" to mean "Don't want to retry"
 281                 */
 282                unsigned long flags;
 283                spin_lock_irqsave(&conf->device_lock, flags);
 284                if (r1_bio->mddev->degraded == conf->raid_disks ||
 285                    (r1_bio->mddev->degraded == conf->raid_disks-1 &&
 286                     !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)))
 287                        uptodate = 1;
 288                spin_unlock_irqrestore(&conf->device_lock, flags);
 289        }
 290
 291        if (uptodate)
 292                raid_end_bio_io(r1_bio);
 293        else {
 294                /*
 295                 * oops, read error:
 296                 */
 297                char b[BDEVNAME_SIZE];
 298                if (printk_ratelimit())
 299                        printk(KERN_ERR "raid1: %s: rescheduling sector %llu\n",
 300                               bdevname(conf->mirrors[mirror].rdev->bdev,b), (unsigned long long)r1_bio->sector);
 301                reschedule_retry(r1_bio);
 302        }
 303
 304        rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
 305}
 306
 307static void raid1_end_write_request(struct bio *bio, int error)
 308{
 309        int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
 310        r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
 311        int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
 312        conf_t *conf = r1_bio->mddev->private;
 313        struct bio *to_put = NULL;
 314
 315
 316        for (mirror = 0; mirror < conf->raid_disks; mirror++)
 317                if (r1_bio->bios[mirror] == bio)
 318                        break;
 319
 320        if (error == -EOPNOTSUPP && test_bit(R1BIO_Barrier, &r1_bio->state)) {
 321                set_bit(BarriersNotsupp, &conf->mirrors[mirror].rdev->flags);
 322                set_bit(R1BIO_BarrierRetry, &r1_bio->state);
 323                r1_bio->mddev->barriers_work = 0;
 324                /* Don't rdev_dec_pending in this branch - keep it for the retry */
 325        } else {
 326                /*
 327                 * this branch is our 'one mirror IO has finished' event handler:
 328                 */
 329                r1_bio->bios[mirror] = NULL;
 330                to_put = bio;
 331                if (!uptodate) {
 332                        md_error(r1_bio->mddev, conf->mirrors[mirror].rdev);
 333                        /* an I/O failed, we can't clear the bitmap */
 334                        set_bit(R1BIO_Degraded, &r1_bio->state);
 335                } else
 336                        /*
 337                         * Set R1BIO_Uptodate in our master bio, so that
 338                         * we will return a good error code for to the higher
 339                         * levels even if IO on some other mirrored buffer fails.
 340                         *
 341                         * The 'master' represents the composite IO operation to
 342                         * user-side. So if something waits for IO, then it will
 343                         * wait for the 'master' bio.
 344                         */
 345                        set_bit(R1BIO_Uptodate, &r1_bio->state);
 346
 347                update_head_pos(mirror, r1_bio);
 348
 349                if (behind) {
 350                        if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags))
 351                                atomic_dec(&r1_bio->behind_remaining);
 352
 353                        /* In behind mode, we ACK the master bio once the I/O has safely
 354                         * reached all non-writemostly disks. Setting the Returned bit
 355                         * ensures that this gets done only once -- we don't ever want to
 356                         * return -EIO here, instead we'll wait */
 357
 358                        if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
 359                            test_bit(R1BIO_Uptodate, &r1_bio->state)) {
 360                                /* Maybe we can return now */
 361                                if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
 362                                        struct bio *mbio = r1_bio->master_bio;
 363                                        PRINTK(KERN_DEBUG "raid1: behind end write sectors %llu-%llu\n",
 364                                               (unsigned long long) mbio->bi_sector,
 365                                               (unsigned long long) mbio->bi_sector +
 366                                               (mbio->bi_size >> 9) - 1);
 367                                        bio_endio(mbio, 0);
 368                                }
 369                        }
 370                }
 371                rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
 372        }
 373        /*
 374         *
 375         * Let's see if all mirrored write operations have finished
 376         * already.
 377         */
 378        if (atomic_dec_and_test(&r1_bio->remaining)) {
 379                if (test_bit(R1BIO_BarrierRetry, &r1_bio->state))
 380                        reschedule_retry(r1_bio);
 381                else {
 382                        /* it really is the end of this request */
 383                        if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
 384                                /* free extra copy of the data pages */
 385                                int i = bio->bi_vcnt;
 386                                while (i--)
 387                                        safe_put_page(bio->bi_io_vec[i].bv_page);
 388                        }
 389                        /* clear the bitmap if all writes complete successfully */
 390                        bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
 391                                        r1_bio->sectors,
 392                                        !test_bit(R1BIO_Degraded, &r1_bio->state),
 393                                        behind);
 394                        md_write_end(r1_bio->mddev);
 395                        raid_end_bio_io(r1_bio);
 396                }
 397        }
 398
 399        if (to_put)
 400                bio_put(to_put);
 401}
 402
 403
 404/*
 405 * This routine returns the disk from which the requested read should
 406 * be done. There is a per-array 'next expected sequential IO' sector
 407 * number - if this matches on the next IO then we use the last disk.
 408 * There is also a per-disk 'last know head position' sector that is
 409 * maintained from IRQ contexts, both the normal and the resync IO
 410 * completion handlers update this position correctly. If there is no
 411 * perfect sequential match then we pick the disk whose head is closest.
 412 *
 413 * If there are 2 mirrors in the same 2 devices, performance degrades
 414 * because position is mirror, not device based.
 415 *
 416 * The rdev for the device selected will have nr_pending incremented.
 417 */
 418static int read_balance(conf_t *conf, r1bio_t *r1_bio)
 419{
 420        const unsigned long this_sector = r1_bio->sector;
 421        int new_disk = conf->last_used, disk = new_disk;
 422        int wonly_disk = -1;
 423        const int sectors = r1_bio->sectors;
 424        sector_t new_distance, current_distance;
 425        mdk_rdev_t *rdev;
 426
 427        rcu_read_lock();
 428        /*
 429         * Check if we can balance. We can balance on the whole
 430         * device if no resync is going on, or below the resync window.
 431         * We take the first readable disk when above the resync window.
 432         */
 433 retry:
 434        if (conf->mddev->recovery_cp < MaxSector &&
 435            (this_sector + sectors >= conf->next_resync)) {
 436                /* Choose the first operation device, for consistancy */
 437                new_disk = 0;
 438
 439                for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
 440                     r1_bio->bios[new_disk] == IO_BLOCKED ||
 441                     !rdev || !test_bit(In_sync, &rdev->flags)
 442                             || test_bit(WriteMostly, &rdev->flags);
 443                     rdev = rcu_dereference(conf->mirrors[++new_disk].rdev)) {
 444
 445                        if (rdev && test_bit(In_sync, &rdev->flags) &&
 446                                r1_bio->bios[new_disk] != IO_BLOCKED)
 447                                wonly_disk = new_disk;
 448
 449                        if (new_disk == conf->raid_disks - 1) {
 450                                new_disk = wonly_disk;
 451                                break;
 452                        }
 453                }
 454                goto rb_out;
 455        }
 456
 457
 458        /* make sure the disk is operational */
 459        for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
 460             r1_bio->bios[new_disk] == IO_BLOCKED ||
 461             !rdev || !test_bit(In_sync, &rdev->flags) ||
 462                     test_bit(WriteMostly, &rdev->flags);
 463             rdev = rcu_dereference(conf->mirrors[new_disk].rdev)) {
 464
 465                if (rdev && test_bit(In_sync, &rdev->flags) &&
 466                    r1_bio->bios[new_disk] != IO_BLOCKED)
 467                        wonly_disk = new_disk;
 468
 469                if (new_disk <= 0)
 470                        new_disk = conf->raid_disks;
 471                new_disk--;
 472                if (new_disk == disk) {
 473                        new_disk = wonly_disk;
 474                        break;
 475                }
 476        }
 477
 478        if (new_disk < 0)
 479                goto rb_out;
 480
 481        disk = new_disk;
 482        /* now disk == new_disk == starting point for search */
 483
 484        /*
 485         * Don't change to another disk for sequential reads:
 486         */
 487        if (conf->next_seq_sect == this_sector)
 488                goto rb_out;
 489        if (this_sector == conf->mirrors[new_disk].head_position)
 490                goto rb_out;
 491
 492        current_distance = abs(this_sector - conf->mirrors[disk].head_position);
 493
 494        /* Find the disk whose head is closest */
 495
 496        do {
 497                if (disk <= 0)
 498                        disk = conf->raid_disks;
 499                disk--;
 500
 501                rdev = rcu_dereference(conf->mirrors[disk].rdev);
 502
 503                if (!rdev || r1_bio->bios[disk] == IO_BLOCKED ||
 504                    !test_bit(In_sync, &rdev->flags) ||
 505                    test_bit(WriteMostly, &rdev->flags))
 506                        continue;
 507
 508                if (!atomic_read(&rdev->nr_pending)) {
 509                        new_disk = disk;
 510                        break;
 511                }
 512                new_distance = abs(this_sector - conf->mirrors[disk].head_position);
 513                if (new_distance < current_distance) {
 514                        current_distance = new_distance;
 515                        new_disk = disk;
 516                }
 517        } while (disk != conf->last_used);
 518
 519 rb_out:
 520
 521
 522        if (new_disk >= 0) {
 523                rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
 524                if (!rdev)
 525                        goto retry;
 526                atomic_inc(&rdev->nr_pending);
 527                if (!test_bit(In_sync, &rdev->flags)) {
 528                        /* cannot risk returning a device that failed
 529                         * before we inc'ed nr_pending
 530                         */
 531                        rdev_dec_pending(rdev, conf->mddev);
 532                        goto retry;
 533                }
 534                conf->next_seq_sect = this_sector + sectors;
 535                conf->last_used = new_disk;
 536        }
 537        rcu_read_unlock();
 538
 539        return new_disk;
 540}
 541
 542static void unplug_slaves(mddev_t *mddev)
 543{
 544        conf_t *conf = mddev->private;
 545        int i;
 546
 547        rcu_read_lock();
 548        for (i=0; i<mddev->raid_disks; i++) {
 549                mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
 550                if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
 551                        struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
 552
 553                        atomic_inc(&rdev->nr_pending);
 554                        rcu_read_unlock();
 555
 556                        blk_unplug(r_queue);
 557
 558                        rdev_dec_pending(rdev, mddev);
 559                        rcu_read_lock();
 560                }
 561        }
 562        rcu_read_unlock();
 563}
 564
 565static void raid1_unplug(struct request_queue *q)
 566{
 567        mddev_t *mddev = q->queuedata;
 568
 569        unplug_slaves(mddev);
 570        md_wakeup_thread(mddev->thread);
 571}
 572
 573static int raid1_congested(void *data, int bits)
 574{
 575        mddev_t *mddev = data;
 576        conf_t *conf = mddev->private;
 577        int i, ret = 0;
 578
 579        if (mddev_congested(mddev, bits))
 580                return 1;
 581
 582        rcu_read_lock();
 583        for (i = 0; i < mddev->raid_disks; i++) {
 584                mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
 585                if (rdev && !test_bit(Faulty, &rdev->flags)) {
 586                        struct request_queue *q = bdev_get_queue(rdev->bdev);
 587
 588                        /* Note the '|| 1' - when read_balance prefers
 589                         * non-congested targets, it can be removed
 590                         */
 591                        if ((bits & (1<<BDI_async_congested)) || 1)
 592                                ret |= bdi_congested(&q->backing_dev_info, bits);
 593                        else
 594                                ret &= bdi_congested(&q->backing_dev_info, bits);
 595                }
 596        }
 597        rcu_read_unlock();
 598        return ret;
 599}
 600
 601
 602static int flush_pending_writes(conf_t *conf)
 603{
 604        /* Any writes that have been queued but are awaiting
 605         * bitmap updates get flushed here.
 606         * We return 1 if any requests were actually submitted.
 607         */
 608        int rv = 0;
 609
 610        spin_lock_irq(&conf->device_lock);
 611
 612        if (conf->pending_bio_list.head) {
 613                struct bio *bio;
 614                bio = bio_list_get(&conf->pending_bio_list);
 615                blk_remove_plug(conf->mddev->queue);
 616                spin_unlock_irq(&conf->device_lock);
 617                /* flush any pending bitmap writes to
 618                 * disk before proceeding w/ I/O */
 619                bitmap_unplug(conf->mddev->bitmap);
 620
 621                while (bio) { /* submit pending writes */
 622                        struct bio *next = bio->bi_next;
 623                        bio->bi_next = NULL;
 624                        generic_make_request(bio);
 625                        bio = next;
 626                }
 627                rv = 1;
 628        } else
 629                spin_unlock_irq(&conf->device_lock);
 630        return rv;
 631}
 632
 633/* Barriers....
 634 * Sometimes we need to suspend IO while we do something else,
 635 * either some resync/recovery, or reconfigure the array.
 636 * To do this we raise a 'barrier'.
 637 * The 'barrier' is a counter that can be raised multiple times
 638 * to count how many activities are happening which preclude
 639 * normal IO.
 640 * We can only raise the barrier if there is no pending IO.
 641 * i.e. if nr_pending == 0.
 642 * We choose only to raise the barrier if no-one is waiting for the
 643 * barrier to go down.  This means that as soon as an IO request
 644 * is ready, no other operations which require a barrier will start
 645 * until the IO request has had a chance.
 646 *
 647 * So: regular IO calls 'wait_barrier'.  When that returns there
 648 *    is no backgroup IO happening,  It must arrange to call
 649 *    allow_barrier when it has finished its IO.
 650 * backgroup IO calls must call raise_barrier.  Once that returns
 651 *    there is no normal IO happeing.  It must arrange to call
 652 *    lower_barrier when the particular background IO completes.
 653 */
 654#define RESYNC_DEPTH 32
 655
 656static void raise_barrier(conf_t *conf)
 657{
 658        spin_lock_irq(&conf->resync_lock);
 659
 660        /* Wait until no block IO is waiting */
 661        wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
 662                            conf->resync_lock,
 663                            raid1_unplug(conf->mddev->queue));
 664
 665        /* block any new IO from starting */
 666        conf->barrier++;
 667
 668        /* No wait for all pending IO to complete */
 669        wait_event_lock_irq(conf->wait_barrier,
 670                            !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
 671                            conf->resync_lock,
 672                            raid1_unplug(conf->mddev->queue));
 673
 674        spin_unlock_irq(&conf->resync_lock);
 675}
 676
 677static void lower_barrier(conf_t *conf)
 678{
 679        unsigned long flags;
 680        spin_lock_irqsave(&conf->resync_lock, flags);
 681        conf->barrier--;
 682        spin_unlock_irqrestore(&conf->resync_lock, flags);
 683        wake_up(&conf->wait_barrier);
 684}
 685
 686static void wait_barrier(conf_t *conf)
 687{
 688        spin_lock_irq(&conf->resync_lock);
 689        if (conf->barrier) {
 690                conf->nr_waiting++;
 691                wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
 692                                    conf->resync_lock,
 693                                    raid1_unplug(conf->mddev->queue));
 694                conf->nr_waiting--;
 695        }
 696        conf->nr_pending++;
 697        spin_unlock_irq(&conf->resync_lock);
 698}
 699
 700static void allow_barrier(conf_t *conf)
 701{
 702        unsigned long flags;
 703        spin_lock_irqsave(&conf->resync_lock, flags);
 704        conf->nr_pending--;
 705        spin_unlock_irqrestore(&conf->resync_lock, flags);
 706        wake_up(&conf->wait_barrier);
 707}
 708
 709static void freeze_array(conf_t *conf)
 710{
 711        /* stop syncio and normal IO and wait for everything to
 712         * go quite.
 713         * We increment barrier and nr_waiting, and then
 714         * wait until nr_pending match nr_queued+1
 715         * This is called in the context of one normal IO request
 716         * that has failed. Thus any sync request that might be pending
 717         * will be blocked by nr_pending, and we need to wait for
 718         * pending IO requests to complete or be queued for re-try.
 719         * Thus the number queued (nr_queued) plus this request (1)
 720         * must match the number of pending IOs (nr_pending) before
 721         * we continue.
 722         */
 723        spin_lock_irq(&conf->resync_lock);
 724        conf->barrier++;
 725        conf->nr_waiting++;
 726        wait_event_lock_irq(conf->wait_barrier,
 727                            conf->nr_pending == conf->nr_queued+1,
 728                            conf->resync_lock,
 729                            ({ flush_pending_writes(conf);
 730                               raid1_unplug(conf->mddev->queue); }));
 731        spin_unlock_irq(&conf->resync_lock);
 732}
 733static void unfreeze_array(conf_t *conf)
 734{
 735        /* reverse the effect of the freeze */
 736        spin_lock_irq(&conf->resync_lock);
 737        conf->barrier--;
 738        conf->nr_waiting--;
 739        wake_up(&conf->wait_barrier);
 740        spin_unlock_irq(&conf->resync_lock);
 741}
 742
 743
 744/* duplicate the data pages for behind I/O */
 745static struct page **alloc_behind_pages(struct bio *bio)
 746{
 747        int i;
 748        struct bio_vec *bvec;
 749        struct page **pages = kzalloc(bio->bi_vcnt * sizeof(struct page *),
 750                                        GFP_NOIO);
 751        if (unlikely(!pages))
 752                goto do_sync_io;
 753
 754        bio_for_each_segment(bvec, bio, i) {
 755                pages[i] = alloc_page(GFP_NOIO);
 756                if (unlikely(!pages[i]))
 757                        goto do_sync_io;
 758                memcpy(kmap(pages[i]) + bvec->bv_offset,
 759                        kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
 760                kunmap(pages[i]);
 761                kunmap(bvec->bv_page);
 762        }
 763
 764        return pages;
 765
 766do_sync_io:
 767        if (pages)
 768                for (i = 0; i < bio->bi_vcnt && pages[i]; i++)
 769                        put_page(pages[i]);
 770        kfree(pages);
 771        PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
 772        return NULL;
 773}
 774
 775static int make_request(struct request_queue *q, struct bio * bio)
 776{
 777        mddev_t *mddev = q->queuedata;
 778        conf_t *conf = mddev->private;
 779        mirror_info_t *mirror;
 780        r1bio_t *r1_bio;
 781        struct bio *read_bio;
 782        int i, targets = 0, disks;
 783        struct bitmap *bitmap;
 784        unsigned long flags;
 785        struct bio_list bl;
 786        struct page **behind_pages = NULL;
 787        const int rw = bio_data_dir(bio);
 788        const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
 789        int cpu;
 790        bool do_barriers;
 791        mdk_rdev_t *blocked_rdev;
 792
 793        /*
 794         * Register the new request and wait if the reconstruction
 795         * thread has put up a bar for new requests.
 796         * Continue immediately if no resync is active currently.
 797         * We test barriers_work *after* md_write_start as md_write_start
 798         * may cause the first superblock write, and that will check out
 799         * if barriers work.
 800         */
 801
 802        md_write_start(mddev, bio); /* wait on superblock update early */
 803
 804        if (unlikely(!mddev->barriers_work &&
 805                     bio_rw_flagged(bio, BIO_RW_BARRIER))) {
 806                if (rw == WRITE)
 807                        md_write_end(mddev);
 808                bio_endio(bio, -EOPNOTSUPP);
 809                return 0;
 810        }
 811
 812        wait_barrier(conf);
 813
 814        bitmap = mddev->bitmap;
 815
 816        cpu = part_stat_lock();
 817        part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
 818        part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
 819                      bio_sectors(bio));
 820        part_stat_unlock();
 821
 822        /*
 823         * make_request() can abort the operation when READA is being
 824         * used and no empty request is available.
 825         *
 826         */
 827        r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
 828
 829        r1_bio->master_bio = bio;
 830        r1_bio->sectors = bio->bi_size >> 9;
 831        r1_bio->state = 0;
 832        r1_bio->mddev = mddev;
 833        r1_bio->sector = bio->bi_sector;
 834
 835        if (rw == READ) {
 836                /*
 837                 * read balancing logic:
 838                 */
 839                int rdisk = read_balance(conf, r1_bio);
 840
 841                if (rdisk < 0) {
 842                        /* couldn't find anywhere to read from */
 843                        raid_end_bio_io(r1_bio);
 844                        return 0;
 845                }
 846                mirror = conf->mirrors + rdisk;
 847
 848                r1_bio->read_disk = rdisk;
 849
 850                read_bio = bio_clone(bio, GFP_NOIO);
 851
 852                r1_bio->bios[rdisk] = read_bio;
 853
 854                read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
 855                read_bio->bi_bdev = mirror->rdev->bdev;
 856                read_bio->bi_end_io = raid1_end_read_request;
 857                read_bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
 858                read_bio->bi_private = r1_bio;
 859
 860                generic_make_request(read_bio);
 861                return 0;
 862        }
 863
 864        /*
 865         * WRITE:
 866         */
 867        /* first select target devices under spinlock and
 868         * inc refcount on their rdev.  Record them by setting
 869         * bios[x] to bio
 870         */
 871        disks = conf->raid_disks;
 872#if 0
 873        { static int first=1;
 874        if (first) printk("First Write sector %llu disks %d\n",
 875                          (unsigned long long)r1_bio->sector, disks);
 876        first = 0;
 877        }
 878#endif
 879 retry_write:
 880        blocked_rdev = NULL;
 881        rcu_read_lock();
 882        for (i = 0;  i < disks; i++) {
 883                mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
 884                if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
 885                        atomic_inc(&rdev->nr_pending);
 886                        blocked_rdev = rdev;
 887                        break;
 888                }
 889                if (rdev && !test_bit(Faulty, &rdev->flags)) {
 890                        atomic_inc(&rdev->nr_pending);
 891                        if (test_bit(Faulty, &rdev->flags)) {
 892                                rdev_dec_pending(rdev, mddev);
 893                                r1_bio->bios[i] = NULL;
 894                        } else
 895                                r1_bio->bios[i] = bio;
 896                        targets++;
 897                } else
 898                        r1_bio->bios[i] = NULL;
 899        }
 900        rcu_read_unlock();
 901
 902        if (unlikely(blocked_rdev)) {
 903                /* Wait for this device to become unblocked */
 904                int j;
 905
 906                for (j = 0; j < i; j++)
 907                        if (r1_bio->bios[j])
 908                                rdev_dec_pending(conf->mirrors[j].rdev, mddev);
 909
 910                allow_barrier(conf);
 911                md_wait_for_blocked_rdev(blocked_rdev, mddev);
 912                wait_barrier(conf);
 913                goto retry_write;
 914        }
 915
 916        BUG_ON(targets == 0); /* we never fail the last device */
 917
 918        if (targets < conf->raid_disks) {
 919                /* array is degraded, we will not clear the bitmap
 920                 * on I/O completion (see raid1_end_write_request) */
 921                set_bit(R1BIO_Degraded, &r1_bio->state);
 922        }
 923
 924        /* do behind I/O ? */
 925        if (bitmap &&
 926            atomic_read(&bitmap->behind_writes) < bitmap->max_write_behind &&
 927            (behind_pages = alloc_behind_pages(bio)) != NULL)
 928                set_bit(R1BIO_BehindIO, &r1_bio->state);
 929
 930        atomic_set(&r1_bio->remaining, 0);
 931        atomic_set(&r1_bio->behind_remaining, 0);
 932
 933        do_barriers = bio_rw_flagged(bio, BIO_RW_BARRIER);
 934        if (do_barriers)
 935                set_bit(R1BIO_Barrier, &r1_bio->state);
 936
 937        bio_list_init(&bl);
 938        for (i = 0; i < disks; i++) {
 939                struct bio *mbio;
 940                if (!r1_bio->bios[i])
 941                        continue;
 942
 943                mbio = bio_clone(bio, GFP_NOIO);
 944                r1_bio->bios[i] = mbio;
 945
 946                mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset;
 947                mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
 948                mbio->bi_end_io = raid1_end_write_request;
 949                mbio->bi_rw = WRITE | (do_barriers << BIO_RW_BARRIER) |
 950                        (do_sync << BIO_RW_SYNCIO);
 951                mbio->bi_private = r1_bio;
 952
 953                if (behind_pages) {
 954                        struct bio_vec *bvec;
 955                        int j;
 956
 957                        /* Yes, I really want the '__' version so that
 958                         * we clear any unused pointer in the io_vec, rather
 959                         * than leave them unchanged.  This is important
 960                         * because when we come to free the pages, we won't
 961                         * know the originial bi_idx, so we just free
 962                         * them all
 963                         */
 964                        __bio_for_each_segment(bvec, mbio, j, 0)
 965                                bvec->bv_page = behind_pages[j];
 966                        if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
 967                                atomic_inc(&r1_bio->behind_remaining);
 968                }
 969
 970                atomic_inc(&r1_bio->remaining);
 971
 972                bio_list_add(&bl, mbio);
 973        }
 974        kfree(behind_pages); /* the behind pages are attached to the bios now */
 975
 976        bitmap_startwrite(bitmap, bio->bi_sector, r1_bio->sectors,
 977                                test_bit(R1BIO_BehindIO, &r1_bio->state));
 978        spin_lock_irqsave(&conf->device_lock, flags);
 979        bio_list_merge(&conf->pending_bio_list, &bl);
 980        bio_list_init(&bl);
 981
 982        blk_plug_device(mddev->queue);
 983        spin_unlock_irqrestore(&conf->device_lock, flags);
 984
 985        /* In case raid1d snuck into freeze_array */
 986        wake_up(&conf->wait_barrier);
 987
 988        if (do_sync)
 989                md_wakeup_thread(mddev->thread);
 990#if 0
 991        while ((bio = bio_list_pop(&bl)) != NULL)
 992                generic_make_request(bio);
 993#endif
 994
 995        return 0;
 996}
 997
 998static void status(struct seq_file *seq, mddev_t *mddev)
 999{
1000        conf_t *conf = mddev->private;
1001        int i;
1002
1003        seq_printf(seq, " [%d/%d] [", conf->raid_disks,
1004                   conf->raid_disks - mddev->degraded);
1005        rcu_read_lock();
1006        for (i = 0; i < conf->raid_disks; i++) {
1007                mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
1008                seq_printf(seq, "%s",
1009                           rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1010        }
1011        rcu_read_unlock();
1012        seq_printf(seq, "]");
1013}
1014
1015
1016static void error(mddev_t *mddev, mdk_rdev_t *rdev)
1017{
1018        char b[BDEVNAME_SIZE];
1019        conf_t *conf = mddev->private;
1020
1021        /*
1022         * If it is not operational, then we have already marked it as dead
1023         * else if it is the last working disks, ignore the error, let the
1024         * next level up know.
1025         * else mark the drive as failed
1026         */
1027        if (test_bit(In_sync, &rdev->flags)
1028            && (conf->raid_disks - mddev->degraded) == 1) {
1029                /*
1030                 * Don't fail the drive, act as though we were just a
1031                 * normal single drive.
1032                 * However don't try a recovery from this drive as
1033                 * it is very likely to fail.
1034                 */
1035                mddev->recovery_disabled = 1;
1036                return;
1037        }
1038        if (test_and_clear_bit(In_sync, &rdev->flags)) {
1039                unsigned long flags;
1040                spin_lock_irqsave(&conf->device_lock, flags);
1041                mddev->degraded++;
1042                set_bit(Faulty, &rdev->flags);
1043                spin_unlock_irqrestore(&conf->device_lock, flags);
1044                /*
1045                 * if recovery is running, make sure it aborts.
1046                 */
1047                set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1048        } else
1049                set_bit(Faulty, &rdev->flags);
1050        set_bit(MD_CHANGE_DEVS, &mddev->flags);
1051        printk(KERN_ALERT "raid1: Disk failure on %s, disabling device.\n"
1052                "raid1: Operation continuing on %d devices.\n",
1053                bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded);
1054}
1055
1056static void print_conf(conf_t *conf)
1057{
1058        int i;
1059
1060        printk("RAID1 conf printout:\n");
1061        if (!conf) {
1062                printk("(!conf)\n");
1063                return;
1064        }
1065        printk(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
1066                conf->raid_disks);
1067
1068        rcu_read_lock();
1069        for (i = 0; i < conf->raid_disks; i++) {
1070                char b[BDEVNAME_SIZE];
1071                mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
1072                if (rdev)
1073                        printk(" disk %d, wo:%d, o:%d, dev:%s\n",
1074                               i, !test_bit(In_sync, &rdev->flags),
1075                               !test_bit(Faulty, &rdev->flags),
1076                               bdevname(rdev->bdev,b));
1077        }
1078        rcu_read_unlock();
1079}
1080
1081static void close_sync(conf_t *conf)
1082{
1083        wait_barrier(conf);
1084        allow_barrier(conf);
1085
1086        mempool_destroy(conf->r1buf_pool);
1087        conf->r1buf_pool = NULL;
1088}
1089
1090static int raid1_spare_active(mddev_t *mddev)
1091{
1092        int i;
1093        conf_t *conf = mddev->private;
1094
1095        /*
1096         * Find all failed disks within the RAID1 configuration 
1097         * and mark them readable.
1098         * Called under mddev lock, so rcu protection not needed.
1099         */
1100        for (i = 0; i < conf->raid_disks; i++) {
1101                mdk_rdev_t *rdev = conf->mirrors[i].rdev;
1102                if (rdev
1103                    && !test_bit(Faulty, &rdev->flags)
1104                    && !test_and_set_bit(In_sync, &rdev->flags)) {
1105                        unsigned long flags;
1106                        spin_lock_irqsave(&conf->device_lock, flags);
1107                        mddev->degraded--;
1108                        spin_unlock_irqrestore(&conf->device_lock, flags);
1109                }
1110        }
1111
1112        print_conf(conf);
1113        return 0;
1114}
1115
1116
1117static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1118{
1119        conf_t *conf = mddev->private;
1120        int err = -EEXIST;
1121        int mirror = 0;
1122        mirror_info_t *p;
1123        int first = 0;
1124        int last = mddev->raid_disks - 1;
1125
1126        if (rdev->raid_disk >= 0)
1127                first = last = rdev->raid_disk;
1128
1129        for (mirror = first; mirror <= last; mirror++)
1130                if ( !(p=conf->mirrors+mirror)->rdev) {
1131
1132                        disk_stack_limits(mddev->gendisk, rdev->bdev,
1133                                          rdev->data_offset << 9);
1134                        /* as we don't honour merge_bvec_fn, we must never risk
1135                         * violating it, so limit ->max_sector to one PAGE, as
1136                         * a one page request is never in violation.
1137                         */
1138                        if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
1139                            queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
1140                                blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
1141
1142                        p->head_position = 0;
1143                        rdev->raid_disk = mirror;
1144                        err = 0;
1145                        /* As all devices are equivalent, we don't need a full recovery
1146                         * if this was recently any drive of the array
1147                         */
1148                        if (rdev->saved_raid_disk < 0)
1149                                conf->fullsync = 1;
1150                        rcu_assign_pointer(p->rdev, rdev);
1151                        break;
1152                }
1153        md_integrity_add_rdev(rdev, mddev);
1154        print_conf(conf);
1155        return err;
1156}
1157
1158static int raid1_remove_disk(mddev_t *mddev, int number)
1159{
1160        conf_t *conf = mddev->private;
1161        int err = 0;
1162        mdk_rdev_t *rdev;
1163        mirror_info_t *p = conf->mirrors+ number;
1164
1165        print_conf(conf);
1166        rdev = p->rdev;
1167        if (rdev) {
1168                if (test_bit(In_sync, &rdev->flags) ||
1169                    atomic_read(&rdev->nr_pending)) {
1170                        err = -EBUSY;
1171                        goto abort;
1172                }
1173                /* Only remove non-faulty devices is recovery
1174                 * is not possible.
1175                 */
1176                if (!test_bit(Faulty, &rdev->flags) &&
1177                    mddev->degraded < conf->raid_disks) {
1178                        err = -EBUSY;
1179                        goto abort;
1180                }
1181                p->rdev = NULL;
1182                synchronize_rcu();
1183                if (atomic_read(&rdev->nr_pending)) {
1184                        /* lost the race, try later */
1185                        err = -EBUSY;
1186                        p->rdev = rdev;
1187                        goto abort;
1188                }
1189                md_integrity_register(mddev);
1190        }
1191abort:
1192
1193        print_conf(conf);
1194        return err;
1195}
1196
1197
1198static void end_sync_read(struct bio *bio, int error)
1199{
1200        r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
1201        int i;
1202
1203        for (i=r1_bio->mddev->raid_disks; i--; )
1204                if (r1_bio->bios[i] == bio)
1205                        break;
1206        BUG_ON(i < 0);
1207        update_head_pos(i, r1_bio);
1208        /*
1209         * we have read a block, now it needs to be re-written,
1210         * or re-read if the read failed.
1211         * We don't do much here, just schedule handling by raid1d
1212         */
1213        if (test_bit(BIO_UPTODATE, &bio->bi_flags))
1214                set_bit(R1BIO_Uptodate, &r1_bio->state);
1215
1216        if (atomic_dec_and_test(&r1_bio->remaining))
1217                reschedule_retry(r1_bio);
1218}
1219
1220static void end_sync_write(struct bio *bio, int error)
1221{
1222        int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1223        r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
1224        mddev_t *mddev = r1_bio->mddev;
1225        conf_t *conf = mddev->private;
1226        int i;
1227        int mirror=0;
1228
1229        for (i = 0; i < conf->raid_disks; i++)
1230                if (r1_bio->bios[i] == bio) {
1231                        mirror = i;
1232                        break;
1233                }
1234        if (!uptodate) {
1235                int sync_blocks = 0;
1236                sector_t s = r1_bio->sector;
1237                long sectors_to_go = r1_bio->sectors;
1238                /* make sure these bits doesn't get cleared. */
1239                do {
1240                        bitmap_end_sync(mddev->bitmap, s,
1241                                        &sync_blocks, 1);
1242                        s += sync_blocks;
1243                        sectors_to_go -= sync_blocks;
1244                } while (sectors_to_go > 0);
1245                md_error(mddev, conf->mirrors[mirror].rdev);
1246        }
1247
1248        update_head_pos(mirror, r1_bio);
1249
1250        if (atomic_dec_and_test(&r1_bio->remaining)) {
1251                sector_t s = r1_bio->sectors;
1252                put_buf(r1_bio);
1253                md_done_sync(mddev, s, uptodate);
1254        }
1255}
1256
1257static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
1258{
1259        conf_t *conf = mddev->private;
1260        int i;
1261        int disks = conf->raid_disks;
1262        struct bio *bio, *wbio;
1263
1264        bio = r1_bio->bios[r1_bio->read_disk];
1265
1266
1267        if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
1268                /* We have read all readable devices.  If we haven't
1269                 * got the block, then there is no hope left.
1270                 * If we have, then we want to do a comparison
1271                 * and skip the write if everything is the same.
1272                 * If any blocks failed to read, then we need to
1273                 * attempt an over-write
1274                 */
1275                int primary;
1276                if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
1277                        for (i=0; i<mddev->raid_disks; i++)
1278                                if (r1_bio->bios[i]->bi_end_io == end_sync_read)
1279                                        md_error(mddev, conf->mirrors[i].rdev);
1280
1281                        md_done_sync(mddev, r1_bio->sectors, 1);
1282                        put_buf(r1_bio);
1283                        return;
1284                }
1285                for (primary=0; primary<mddev->raid_disks; primary++)
1286                        if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
1287                            test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
1288                                r1_bio->bios[primary]->bi_end_io = NULL;
1289                                rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
1290                                break;
1291                        }
1292                r1_bio->read_disk = primary;
1293                for (i=0; i<mddev->raid_disks; i++)
1294                        if (r1_bio->bios[i]->bi_end_io == end_sync_read) {
1295                                int j;
1296                                int vcnt = r1_bio->sectors >> (PAGE_SHIFT- 9);
1297                                struct bio *pbio = r1_bio->bios[primary];
1298                                struct bio *sbio = r1_bio->bios[i];
1299
1300                                if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) {
1301                                        for (j = vcnt; j-- ; ) {
1302                                                struct page *p, *s;
1303                                                p = pbio->bi_io_vec[j].bv_page;
1304                                                s = sbio->bi_io_vec[j].bv_page;
1305                                                if (memcmp(page_address(p),
1306                                                           page_address(s),
1307                                                           PAGE_SIZE))
1308                                                        break;
1309                                        }
1310                                } else
1311                                        j = 0;
1312                                if (j >= 0)
1313                                        mddev->resync_mismatches += r1_bio->sectors;
1314                                if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
1315                                              && test_bit(BIO_UPTODATE, &sbio->bi_flags))) {
1316                                        sbio->bi_end_io = NULL;
1317                                        rdev_dec_pending(conf->mirrors[i].rdev, mddev);
1318                                } else {
1319                                        /* fixup the bio for reuse */
1320                                        int size;
1321                                        sbio->bi_vcnt = vcnt;
1322                                        sbio->bi_size = r1_bio->sectors << 9;
1323                                        sbio->bi_idx = 0;
1324                                        sbio->bi_phys_segments = 0;
1325                                        sbio->bi_flags &= ~(BIO_POOL_MASK - 1);
1326                                        sbio->bi_flags |= 1 << BIO_UPTODATE;
1327                                        sbio->bi_next = NULL;
1328                                        sbio->bi_sector = r1_bio->sector +
1329                                                conf->mirrors[i].rdev->data_offset;
1330                                        sbio->bi_bdev = conf->mirrors[i].rdev->bdev;
1331                                        size = sbio->bi_size;
1332                                        for (j = 0; j < vcnt ; j++) {
1333                                                struct bio_vec *bi;
1334                                                bi = &sbio->bi_io_vec[j];
1335                                                bi->bv_offset = 0;
1336                                                if (size > PAGE_SIZE)
1337                                                        bi->bv_len = PAGE_SIZE;
1338                                                else
1339                                                        bi->bv_len = size;
1340                                                size -= PAGE_SIZE;
1341                                                memcpy(page_address(bi->bv_page),
1342                                                       page_address(pbio->bi_io_vec[j].bv_page),
1343                                                       PAGE_SIZE);
1344                                        }
1345
1346                                }
1347                        }
1348        }
1349        if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
1350                /* ouch - failed to read all of that.
1351                 * Try some synchronous reads of other devices to get
1352                 * good data, much like with normal read errors.  Only
1353                 * read into the pages we already have so we don't
1354                 * need to re-issue the read request.
1355                 * We don't need to freeze the array, because being in an
1356                 * active sync request, there is no normal IO, and
1357                 * no overlapping syncs.
1358                 */
1359                sector_t sect = r1_bio->sector;
1360                int sectors = r1_bio->sectors;
1361                int idx = 0;
1362
1363                while(sectors) {
1364                        int s = sectors;
1365                        int d = r1_bio->read_disk;
1366                        int success = 0;
1367                        mdk_rdev_t *rdev;
1368
1369                        if (s > (PAGE_SIZE>>9))
1370                                s = PAGE_SIZE >> 9;
1371                        do {
1372                                if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
1373                                        /* No rcu protection needed here devices
1374                                         * can only be removed when no resync is
1375                                         * active, and resync is currently active
1376                                         */
1377                                        rdev = conf->mirrors[d].rdev;
1378                                        if (sync_page_io(rdev->bdev,
1379                                                         sect + rdev->data_offset,
1380                                                         s<<9,
1381                                                         bio->bi_io_vec[idx].bv_page,
1382                                                         READ)) {
1383                                                success = 1;
1384                                                break;
1385                                        }
1386                                }
1387                                d++;
1388                                if (d == conf->raid_disks)
1389                                        d = 0;
1390                        } while (!success && d != r1_bio->read_disk);
1391
1392                        if (success) {
1393                                int start = d;
1394                                /* write it back and re-read */
1395                                set_bit(R1BIO_Uptodate, &r1_bio->state);
1396                                while (d != r1_bio->read_disk) {
1397                                        if (d == 0)
1398                                                d = conf->raid_disks;
1399                                        d--;
1400                                        if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1401                                                continue;
1402                                        rdev = conf->mirrors[d].rdev;
1403                                        atomic_add(s, &rdev->corrected_errors);
1404                                        if (sync_page_io(rdev->bdev,
1405                                                         sect + rdev->data_offset,
1406                                                         s<<9,
1407                                                         bio->bi_io_vec[idx].bv_page,
1408                                                         WRITE) == 0)
1409                                                md_error(mddev, rdev);
1410                                }
1411                                d = start;
1412                                while (d != r1_bio->read_disk) {
1413                                        if (d == 0)
1414                                                d = conf->raid_disks;
1415                                        d--;
1416                                        if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1417                                                continue;
1418                                        rdev = conf->mirrors[d].rdev;
1419                                        if (sync_page_io(rdev->bdev,
1420                                                         sect + rdev->data_offset,
1421                                                         s<<9,
1422                                                         bio->bi_io_vec[idx].bv_page,
1423                                                         READ) == 0)
1424                                                md_error(mddev, rdev);
1425                                }
1426                        } else {
1427                                char b[BDEVNAME_SIZE];
1428                                /* Cannot read from anywhere, array is toast */
1429                                md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
1430                                printk(KERN_ALERT "raid1: %s: unrecoverable I/O read error"
1431                                       " for block %llu\n",
1432                                       bdevname(bio->bi_bdev,b),
1433                                       (unsigned long long)r1_bio->sector);
1434                                md_done_sync(mddev, r1_bio->sectors, 0);
1435                                put_buf(r1_bio);
1436                                return;
1437                        }
1438                        sectors -= s;
1439                        sect += s;
1440                        idx ++;
1441                }
1442        }
1443
1444        /*
1445         * schedule writes
1446         */
1447        atomic_set(&r1_bio->remaining, 1);
1448        for (i = 0; i < disks ; i++) {
1449                wbio = r1_bio->bios[i];
1450                if (wbio->bi_end_io == NULL ||
1451                    (wbio->bi_end_io == end_sync_read &&
1452                     (i == r1_bio->read_disk ||
1453                      !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
1454                        continue;
1455
1456                wbio->bi_rw = WRITE;
1457                wbio->bi_end_io = end_sync_write;
1458                atomic_inc(&r1_bio->remaining);
1459                md_sync_acct(conf->mirrors[i].rdev->bdev, wbio->bi_size >> 9);
1460
1461                generic_make_request(wbio);
1462        }
1463
1464        if (atomic_dec_and_test(&r1_bio->remaining)) {
1465                /* if we're here, all write(s) have completed, so clean up */
1466                md_done_sync(mddev, r1_bio->sectors, 1);
1467                put_buf(r1_bio);
1468        }
1469}
1470
1471/*
1472 * This is a kernel thread which:
1473 *
1474 *      1.      Retries failed read operations on working mirrors.
1475 *      2.      Updates the raid superblock when problems encounter.
1476 *      3.      Performs writes following reads for array syncronising.
1477 */
1478
1479static void fix_read_error(conf_t *conf, int read_disk,
1480                           sector_t sect, int sectors)
1481{
1482        mddev_t *mddev = conf->mddev;
1483        while(sectors) {
1484                int s = sectors;
1485                int d = read_disk;
1486                int success = 0;
1487                int start;
1488                mdk_rdev_t *rdev;
1489
1490                if (s > (PAGE_SIZE>>9))
1491                        s = PAGE_SIZE >> 9;
1492
1493                do {
1494                        /* Note: no rcu protection needed here
1495                         * as this is synchronous in the raid1d thread
1496                         * which is the thread that might remove
1497                         * a device.  If raid1d ever becomes multi-threaded....
1498                         */
1499                        rdev = conf->mirrors[d].rdev;
1500                        if (rdev &&
1501                            test_bit(In_sync, &rdev->flags) &&
1502                            sync_page_io(rdev->bdev,
1503                                         sect + rdev->data_offset,
1504                                         s<<9,
1505                                         conf->tmppage, READ))
1506                                success = 1;
1507                        else {
1508                                d++;
1509                                if (d == conf->raid_disks)
1510                                        d = 0;
1511                        }
1512                } while (!success && d != read_disk);
1513
1514                if (!success) {
1515                        /* Cannot read from anywhere -- bye bye array */
1516                        md_error(mddev, conf->mirrors[read_disk].rdev);
1517                        break;
1518                }
1519                /* write it back and re-read */
1520                start = d;
1521                while (d != read_disk) {
1522                        if (d==0)
1523                                d = conf->raid_disks;
1524                        d--;
1525                        rdev = conf->mirrors[d].rdev;
1526                        if (rdev &&
1527                            test_bit(In_sync, &rdev->flags)) {
1528                                if (sync_page_io(rdev->bdev,
1529                                                 sect + rdev->data_offset,
1530                                                 s<<9, conf->tmppage, WRITE)
1531                                    == 0)
1532                                        /* Well, this device is dead */
1533                                        md_error(mddev, rdev);
1534                        }
1535                }
1536                d = start;
1537                while (d != read_disk) {
1538                        char b[BDEVNAME_SIZE];
1539                        if (d==0)
1540                                d = conf->raid_disks;
1541                        d--;
1542                        rdev = conf->mirrors[d].rdev;
1543                        if (rdev &&
1544                            test_bit(In_sync, &rdev->flags)) {
1545                                if (sync_page_io(rdev->bdev,
1546                                                 sect + rdev->data_offset,
1547                                                 s<<9, conf->tmppage, READ)
1548                                    == 0)
1549                                        /* Well, this device is dead */
1550                                        md_error(mddev, rdev);
1551                                else {
1552                                        atomic_add(s, &rdev->corrected_errors);
1553                                        printk(KERN_INFO
1554                                               "raid1:%s: read error corrected "
1555                                               "(%d sectors at %llu on %s)\n",
1556                                               mdname(mddev), s,
1557                                               (unsigned long long)(sect +
1558                                                   rdev->data_offset),
1559                                               bdevname(rdev->bdev, b));
1560                                }
1561                        }
1562                }
1563                sectors -= s;
1564                sect += s;
1565        }
1566}
1567
1568static void raid1d(mddev_t *mddev)
1569{
1570        r1bio_t *r1_bio;
1571        struct bio *bio;
1572        unsigned long flags;
1573        conf_t *conf = mddev->private;
1574        struct list_head *head = &conf->retry_list;
1575        int unplug=0;
1576        mdk_rdev_t *rdev;
1577
1578        md_check_recovery(mddev);
1579        
1580        for (;;) {
1581                char b[BDEVNAME_SIZE];
1582
1583                unplug += flush_pending_writes(conf);
1584
1585                spin_lock_irqsave(&conf->device_lock, flags);
1586                if (list_empty(head)) {
1587                        spin_unlock_irqrestore(&conf->device_lock, flags);
1588                        break;
1589                }
1590                r1_bio = list_entry(head->prev, r1bio_t, retry_list);
1591                list_del(head->prev);
1592                conf->nr_queued--;
1593                spin_unlock_irqrestore(&conf->device_lock, flags);
1594
1595                mddev = r1_bio->mddev;
1596                conf = mddev->private;
1597                if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
1598                        sync_request_write(mddev, r1_bio);
1599                        unplug = 1;
1600                } else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) {
1601                        /* some requests in the r1bio were BIO_RW_BARRIER
1602                         * requests which failed with -EOPNOTSUPP.  Hohumm..
1603                         * Better resubmit without the barrier.
1604                         * We know which devices to resubmit for, because
1605                         * all others have had their bios[] entry cleared.
1606                         * We already have a nr_pending reference on these rdevs.
1607                         */
1608                        int i;
1609                        const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO);
1610                        clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
1611                        clear_bit(R1BIO_Barrier, &r1_bio->state);
1612                        for (i=0; i < conf->raid_disks; i++)
1613                                if (r1_bio->bios[i])
1614                                        atomic_inc(&r1_bio->remaining);
1615                        for (i=0; i < conf->raid_disks; i++)
1616                                if (r1_bio->bios[i]) {
1617                                        struct bio_vec *bvec;
1618                                        int j;
1619
1620                                        bio = bio_clone(r1_bio->master_bio, GFP_NOIO);
1621                                        /* copy pages from the failed bio, as
1622                                         * this might be a write-behind device */
1623                                        __bio_for_each_segment(bvec, bio, j, 0)
1624                                                bvec->bv_page = bio_iovec_idx(r1_bio->bios[i], j)->bv_page;
1625                                        bio_put(r1_bio->bios[i]);
1626                                        bio->bi_sector = r1_bio->sector +
1627                                                conf->mirrors[i].rdev->data_offset;
1628                                        bio->bi_bdev = conf->mirrors[i].rdev->bdev;
1629                                        bio->bi_end_io = raid1_end_write_request;
1630                                        bio->bi_rw = WRITE |
1631                                                (do_sync << BIO_RW_SYNCIO);
1632                                        bio->bi_private = r1_bio;
1633                                        r1_bio->bios[i] = bio;
1634                                        generic_make_request(bio);
1635                                }
1636                } else {
1637                        int disk;
1638
1639                        /* we got a read error. Maybe the drive is bad.  Maybe just
1640                         * the block and we can fix it.
1641                         * We freeze all other IO, and try reading the block from
1642                         * other devices.  When we find one, we re-write
1643                         * and check it that fixes the read error.
1644                         * This is all done synchronously while the array is
1645                         * frozen
1646                         */
1647                        if (mddev->ro == 0) {
1648                                freeze_array(conf);
1649                                fix_read_error(conf, r1_bio->read_disk,
1650                                               r1_bio->sector,
1651                                               r1_bio->sectors);
1652                                unfreeze_array(conf);
1653                        } else
1654                                md_error(mddev,
1655                                         conf->mirrors[r1_bio->read_disk].rdev);
1656
1657                        bio = r1_bio->bios[r1_bio->read_disk];
1658                        if ((disk=read_balance(conf, r1_bio)) == -1) {
1659                                printk(KERN_ALERT "raid1: %s: unrecoverable I/O"
1660                                       " read error for block %llu\n",
1661                                       bdevname(bio->bi_bdev,b),
1662                                       (unsigned long long)r1_bio->sector);
1663                                raid_end_bio_io(r1_bio);
1664                        } else {
1665                                const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO);
1666                                r1_bio->bios[r1_bio->read_disk] =
1667                                        mddev->ro ? IO_BLOCKED : NULL;
1668                                r1_bio->read_disk = disk;
1669                                bio_put(bio);
1670                                bio = bio_clone(r1_bio->master_bio, GFP_NOIO);
1671                                r1_bio->bios[r1_bio->read_disk] = bio;
1672                                rdev = conf->mirrors[disk].rdev;
1673                                if (printk_ratelimit())
1674                                        printk(KERN_ERR "raid1: %s: redirecting sector %llu to"
1675                                               " another mirror\n",
1676                                               bdevname(rdev->bdev,b),
1677                                               (unsigned long long)r1_bio->sector);
1678                                bio->bi_sector = r1_bio->sector + rdev->data_offset;
1679                                bio->bi_bdev = rdev->bdev;
1680                                bio->bi_end_io = raid1_end_read_request;
1681                                bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
1682                                bio->bi_private = r1_bio;
1683                                unplug = 1;
1684                                generic_make_request(bio);
1685                        }
1686                }
1687                cond_resched();
1688        }
1689        if (unplug)
1690                unplug_slaves(mddev);
1691}
1692
1693
1694static int init_resync(conf_t *conf)
1695{
1696        int buffs;
1697
1698        buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
1699        BUG_ON(conf->r1buf_pool);
1700        conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
1701                                          conf->poolinfo);
1702        if (!conf->r1buf_pool)
1703                return -ENOMEM;
1704        conf->next_resync = 0;
1705        return 0;
1706}
1707
1708/*
1709 * perform a "sync" on one "block"
1710 *
1711 * We need to make sure that no normal I/O request - particularly write
1712 * requests - conflict with active sync requests.
1713 *
1714 * This is achieved by tracking pending requests and a 'barrier' concept
1715 * that can be installed to exclude normal IO requests.
1716 */
1717
1718static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
1719{
1720        conf_t *conf = mddev->private;
1721        r1bio_t *r1_bio;
1722        struct bio *bio;
1723        sector_t max_sector, nr_sectors;
1724        int disk = -1;
1725        int i;
1726        int wonly = -1;
1727        int write_targets = 0, read_targets = 0;
1728        int sync_blocks;
1729        int still_degraded = 0;
1730
1731        if (!conf->r1buf_pool)
1732        {
1733/*
1734                printk("sync start - bitmap %p\n", mddev->bitmap);
1735*/
1736                if (init_resync(conf))
1737                        return 0;
1738        }
1739
1740        max_sector = mddev->dev_sectors;
1741        if (sector_nr >= max_sector) {
1742                /* If we aborted, we need to abort the
1743                 * sync on the 'current' bitmap chunk (there will
1744                 * only be one in raid1 resync.
1745                 * We can find the current addess in mddev->curr_resync
1746                 */
1747                if (mddev->curr_resync < max_sector) /* aborted */
1748                        bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
1749                                                &sync_blocks, 1);
1750                else /* completed sync */
1751                        conf->fullsync = 0;
1752
1753                bitmap_close_sync(mddev->bitmap);
1754                close_sync(conf);
1755                return 0;
1756        }
1757
1758        if (mddev->bitmap == NULL &&
1759            mddev->recovery_cp == MaxSector &&
1760            !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
1761            conf->fullsync == 0) {
1762                *skipped = 1;
1763                return max_sector - sector_nr;
1764        }
1765        /* before building a request, check if we can skip these blocks..
1766         * This call the bitmap_start_sync doesn't actually record anything
1767         */
1768        if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
1769            !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
1770                /* We can skip this block, and probably several more */
1771                *skipped = 1;
1772                return sync_blocks;
1773        }
1774        /*
1775         * If there is non-resync activity waiting for a turn,
1776         * and resync is going fast enough,
1777         * then let it though before starting on this new sync request.
1778         */
1779        if (!go_faster && conf->nr_waiting)
1780                msleep_interruptible(1000);
1781
1782        bitmap_cond_end_sync(mddev->bitmap, sector_nr);
1783        raise_barrier(conf);
1784
1785        conf->next_resync = sector_nr;
1786
1787        r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
1788        rcu_read_lock();
1789        /*
1790         * If we get a correctably read error during resync or recovery,
1791         * we might want to read from a different device.  So we
1792         * flag all drives that could conceivably be read from for READ,
1793         * and any others (which will be non-In_sync devices) for WRITE.
1794         * If a read fails, we try reading from something else for which READ
1795         * is OK.
1796         */
1797
1798        r1_bio->mddev = mddev;
1799        r1_bio->sector = sector_nr;
1800        r1_bio->state = 0;
1801        set_bit(R1BIO_IsSync, &r1_bio->state);
1802
1803        for (i=0; i < conf->raid_disks; i++) {
1804                mdk_rdev_t *rdev;
1805                bio = r1_bio->bios[i];
1806
1807                /* take from bio_init */
1808                bio->bi_next = NULL;
1809                bio->bi_flags |= 1 << BIO_UPTODATE;
1810                bio->bi_rw = READ;
1811                bio->bi_vcnt = 0;
1812                bio->bi_idx = 0;
1813                bio->bi_phys_segments = 0;
1814                bio->bi_size = 0;
1815                bio->bi_end_io = NULL;
1816                bio->bi_private = NULL;
1817
1818                rdev = rcu_dereference(conf->mirrors[i].rdev);
1819                if (rdev == NULL ||
1820                           test_bit(Faulty, &rdev->flags)) {
1821                        still_degraded = 1;
1822                        continue;
1823                } else if (!test_bit(In_sync, &rdev->flags)) {
1824                        bio->bi_rw = WRITE;
1825                        bio->bi_end_io = end_sync_write;
1826                        write_targets ++;
1827                } else {
1828                        /* may need to read from here */
1829                        bio->bi_rw = READ;
1830                        bio->bi_end_io = end_sync_read;
1831                        if (test_bit(WriteMostly, &rdev->flags)) {
1832                                if (wonly < 0)
1833                                        wonly = i;
1834                        } else {
1835                                if (disk < 0)
1836                                        disk = i;
1837                        }
1838                        read_targets++;
1839                }
1840                atomic_inc(&rdev->nr_pending);
1841                bio->bi_sector = sector_nr + rdev->data_offset;
1842                bio->bi_bdev = rdev->bdev;
1843                bio->bi_private = r1_bio;
1844        }
1845        rcu_read_unlock();
1846        if (disk < 0)
1847                disk = wonly;
1848        r1_bio->read_disk = disk;
1849
1850        if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
1851                /* extra read targets are also write targets */
1852                write_targets += read_targets-1;
1853
1854        if (write_targets == 0 || read_targets == 0) {
1855                /* There is nowhere to write, so all non-sync
1856                 * drives must be failed - so we are finished
1857                 */
1858                sector_t rv = max_sector - sector_nr;
1859                *skipped = 1;
1860                put_buf(r1_bio);
1861                return rv;
1862        }
1863
1864        if (max_sector > mddev->resync_max)
1865                max_sector = mddev->resync_max; /* Don't do IO beyond here */
1866        nr_sectors = 0;
1867        sync_blocks = 0;
1868        do {
1869                struct page *page;
1870                int len = PAGE_SIZE;
1871                if (sector_nr + (len>>9) > max_sector)
1872                        len = (max_sector - sector_nr) << 9;
1873                if (len == 0)
1874                        break;
1875                if (sync_blocks == 0) {
1876                        if (!bitmap_start_sync(mddev->bitmap, sector_nr,
1877                                               &sync_blocks, still_degraded) &&
1878                            !conf->fullsync &&
1879                            !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
1880                                break;
1881                        BUG_ON(sync_blocks < (PAGE_SIZE>>9));
1882                        if (len > (sync_blocks<<9))
1883                                len = sync_blocks<<9;
1884                }
1885
1886                for (i=0 ; i < conf->raid_disks; i++) {
1887                        bio = r1_bio->bios[i];
1888                        if (bio->bi_end_io) {
1889                                page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
1890                                if (bio_add_page(bio, page, len, 0) == 0) {
1891                                        /* stop here */
1892                                        bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
1893                                        while (i > 0) {
1894                                                i--;
1895                                                bio = r1_bio->bios[i];
1896                                                if (bio->bi_end_io==NULL)
1897                                                        continue;
1898                                                /* remove last page from this bio */
1899                                                bio->bi_vcnt--;
1900                                                bio->bi_size -= len;
1901                                                bio->bi_flags &= ~(1<< BIO_SEG_VALID);
1902                                        }
1903                                        goto bio_full;
1904                                }
1905                        }
1906                }
1907                nr_sectors += len>>9;
1908                sector_nr += len>>9;
1909                sync_blocks -= (len>>9);
1910        } while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES);
1911 bio_full:
1912        r1_bio->sectors = nr_sectors;
1913
1914        /* For a user-requested sync, we read all readable devices and do a
1915         * compare
1916         */
1917        if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
1918                atomic_set(&r1_bio->remaining, read_targets);
1919                for (i=0; i<conf->raid_disks; i++) {
1920                        bio = r1_bio->bios[i];
1921                        if (bio->bi_end_io == end_sync_read) {
1922                                md_sync_acct(bio->bi_bdev, nr_sectors);
1923                                generic_make_request(bio);
1924                        }
1925                }
1926        } else {
1927                atomic_set(&r1_bio->remaining, 1);
1928                bio = r1_bio->bios[r1_bio->read_disk];
1929                md_sync_acct(bio->bi_bdev, nr_sectors);
1930                generic_make_request(bio);
1931
1932        }
1933        return nr_sectors;
1934}
1935
1936static sector_t raid1_size(mddev_t *mddev, sector_t sectors, int raid_disks)
1937{
1938        if (sectors)
1939                return sectors;
1940
1941        return mddev->dev_sectors;
1942}
1943
1944static int run(mddev_t *mddev)
1945{
1946        conf_t *conf;
1947        int i, j, disk_idx;
1948        mirror_info_t *disk;
1949        mdk_rdev_t *rdev;
1950
1951        if (mddev->level != 1) {
1952                printk("raid1: %s: raid level not set to mirroring (%d)\n",
1953                       mdname(mddev), mddev->level);
1954                goto out;
1955        }
1956        if (mddev->reshape_position != MaxSector) {
1957                printk("raid1: %s: reshape_position set but not supported\n",
1958                       mdname(mddev));
1959                goto out;
1960        }
1961        /*
1962         * copy the already verified devices into our private RAID1
1963         * bookkeeping area. [whatever we allocate in run(),
1964         * should be freed in stop()]
1965         */
1966        conf = kzalloc(sizeof(conf_t), GFP_KERNEL);
1967        mddev->private = conf;
1968        if (!conf)
1969                goto out_no_mem;
1970
1971        conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
1972                                 GFP_KERNEL);
1973        if (!conf->mirrors)
1974                goto out_no_mem;
1975
1976        conf->tmppage = alloc_page(GFP_KERNEL);
1977        if (!conf->tmppage)
1978                goto out_no_mem;
1979
1980        conf->poolinfo = kmalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
1981        if (!conf->poolinfo)
1982                goto out_no_mem;
1983        conf->poolinfo->mddev = NULL;
1984        conf->poolinfo->raid_disks = mddev->raid_disks;
1985        conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
1986                                          r1bio_pool_free,
1987                                          conf->poolinfo);
1988        if (!conf->r1bio_pool)
1989                goto out_no_mem;
1990        conf->poolinfo->mddev = mddev;
1991
1992        spin_lock_init(&conf->device_lock);
1993        mddev->queue->queue_lock = &conf->device_lock;
1994
1995        list_for_each_entry(rdev, &mddev->disks, same_set) {
1996                disk_idx = rdev->raid_disk;
1997                if (disk_idx >= mddev->raid_disks
1998                    || disk_idx < 0)
1999                        continue;
2000                disk = conf->mirrors + disk_idx;
2001
2002                disk->rdev = rdev;
2003                disk_stack_limits(mddev->gendisk, rdev->bdev,
2004                                  rdev->data_offset << 9);
2005                /* as we don't honour merge_bvec_fn, we must never risk
2006                 * violating it, so limit ->max_sector to one PAGE, as
2007                 * a one page request is never in violation.
2008                 */
2009                if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
2010                    queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
2011                        blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
2012
2013                disk->head_position = 0;
2014        }
2015        conf->raid_disks = mddev->raid_disks;
2016        conf->mddev = mddev;
2017        INIT_LIST_HEAD(&conf->retry_list);
2018
2019        spin_lock_init(&conf->resync_lock);
2020        init_waitqueue_head(&conf->wait_barrier);
2021
2022        bio_list_init(&conf->pending_bio_list);
2023        bio_list_init(&conf->flushing_bio_list);
2024
2025
2026        mddev->degraded = 0;
2027        for (i = 0; i < conf->raid_disks; i++) {
2028
2029                disk = conf->mirrors + i;
2030
2031                if (!disk->rdev ||
2032                    !test_bit(In_sync, &disk->rdev->flags)) {
2033                        disk->head_position = 0;
2034                        mddev->degraded++;
2035                        if (disk->rdev)
2036                                conf->fullsync = 1;
2037                }
2038        }
2039        if (mddev->degraded == conf->raid_disks) {
2040                printk(KERN_ERR "raid1: no operational mirrors for %s\n",
2041                        mdname(mddev));
2042                goto out_free_conf;
2043        }
2044        if (conf->raid_disks - mddev->degraded == 1)
2045                mddev->recovery_cp = MaxSector;
2046
2047        /*
2048         * find the first working one and use it as a starting point
2049         * to read balancing.
2050         */
2051        for (j = 0; j < conf->raid_disks &&
2052                     (!conf->mirrors[j].rdev ||
2053                      !test_bit(In_sync, &conf->mirrors[j].rdev->flags)) ; j++)
2054                /* nothing */;
2055        conf->last_used = j;
2056
2057
2058        mddev->thread = md_register_thread(raid1d, mddev, NULL);
2059        if (!mddev->thread) {
2060                printk(KERN_ERR
2061                       "raid1: couldn't allocate thread for %s\n",
2062                       mdname(mddev));
2063                goto out_free_conf;
2064        }
2065
2066        if (mddev->recovery_cp != MaxSector)
2067                printk(KERN_NOTICE "raid1: %s is not clean"
2068                       " -- starting background reconstruction\n",
2069                       mdname(mddev));
2070        printk(KERN_INFO 
2071                "raid1: raid set %s active with %d out of %d mirrors\n",
2072                mdname(mddev), mddev->raid_disks - mddev->degraded, 
2073                mddev->raid_disks);
2074        /*
2075         * Ok, everything is just fine now
2076         */
2077        md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
2078
2079        mddev->queue->unplug_fn = raid1_unplug;
2080        mddev->queue->backing_dev_info.congested_fn = raid1_congested;
2081        mddev->queue->backing_dev_info.congested_data = mddev;
2082        md_integrity_register(mddev);
2083        return 0;
2084
2085out_no_mem:
2086        printk(KERN_ERR "raid1: couldn't allocate memory for %s\n",
2087               mdname(mddev));
2088
2089out_free_conf:
2090        if (conf) {
2091                if (conf->r1bio_pool)
2092                        mempool_destroy(conf->r1bio_pool);
2093                kfree(conf->mirrors);
2094                safe_put_page(conf->tmppage);
2095                kfree(conf->poolinfo);
2096                kfree(conf);
2097                mddev->private = NULL;
2098        }
2099out:
2100        return -EIO;
2101}
2102
2103static int stop(mddev_t *mddev)
2104{
2105        conf_t *conf = mddev->private;
2106        struct bitmap *bitmap = mddev->bitmap;
2107        int behind_wait = 0;
2108
2109        /* wait for behind writes to complete */
2110        while (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
2111                behind_wait++;
2112                printk(KERN_INFO "raid1: behind writes in progress on device %s, waiting to stop (%d)\n", mdname(mddev), behind_wait);
2113                set_current_state(TASK_UNINTERRUPTIBLE);
2114                schedule_timeout(HZ); /* wait a second */
2115                /* need to kick something here to make sure I/O goes? */
2116        }
2117
2118        raise_barrier(conf);
2119        lower_barrier(conf);
2120
2121        md_unregister_thread(mddev->thread);
2122        mddev->thread = NULL;
2123        blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
2124        if (conf->r1bio_pool)
2125                mempool_destroy(conf->r1bio_pool);
2126        kfree(conf->mirrors);
2127        kfree(conf->poolinfo);
2128        kfree(conf);
2129        mddev->private = NULL;
2130        return 0;
2131}
2132
2133static int raid1_resize(mddev_t *mddev, sector_t sectors)
2134{
2135        /* no resync is happening, and there is enough space
2136         * on all devices, so we can resize.
2137         * We need to make sure resync covers any new space.
2138         * If the array is shrinking we should possibly wait until
2139         * any io in the removed space completes, but it hardly seems
2140         * worth it.
2141         */
2142        md_set_array_sectors(mddev, raid1_size(mddev, sectors, 0));
2143        if (mddev->array_sectors > raid1_size(mddev, sectors, 0))
2144                return -EINVAL;
2145        set_capacity(mddev->gendisk, mddev->array_sectors);
2146        mddev->changed = 1;
2147        revalidate_disk(mddev->gendisk);
2148        if (sectors > mddev->dev_sectors &&
2149            mddev->recovery_cp == MaxSector) {
2150                mddev->recovery_cp = mddev->dev_sectors;
2151                set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2152        }
2153        mddev->dev_sectors = sectors;
2154        mddev->resync_max_sectors = sectors;
2155        return 0;
2156}
2157
2158static int raid1_reshape(mddev_t *mddev)
2159{
2160        /* We need to:
2161         * 1/ resize the r1bio_pool
2162         * 2/ resize conf->mirrors
2163         *
2164         * We allocate a new r1bio_pool if we can.
2165         * Then raise a device barrier and wait until all IO stops.
2166         * Then resize conf->mirrors and swap in the new r1bio pool.
2167         *
2168         * At the same time, we "pack" the devices so that all the missing
2169         * devices have the higher raid_disk numbers.
2170         */
2171        mempool_t *newpool, *oldpool;
2172        struct pool_info *newpoolinfo;
2173        mirror_info_t *newmirrors;
2174        conf_t *conf = mddev->private;
2175        int cnt, raid_disks;
2176        unsigned long flags;
2177        int d, d2, err;
2178
2179        /* Cannot change chunk_size, layout, or level */
2180        if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
2181            mddev->layout != mddev->new_layout ||
2182            mddev->level != mddev->new_level) {
2183                mddev->new_chunk_sectors = mddev->chunk_sectors;
2184                mddev->new_layout = mddev->layout;
2185                mddev->new_level = mddev->level;
2186                return -EINVAL;
2187        }
2188
2189        err = md_allow_write(mddev);
2190        if (err)
2191                return err;
2192
2193        raid_disks = mddev->raid_disks + mddev->delta_disks;
2194
2195        if (raid_disks < conf->raid_disks) {
2196                cnt=0;
2197                for (d= 0; d < conf->raid_disks; d++)
2198                        if (conf->mirrors[d].rdev)
2199                                cnt++;
2200                if (cnt > raid_disks)
2201                        return -EBUSY;
2202        }
2203
2204        newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
2205        if (!newpoolinfo)
2206                return -ENOMEM;
2207        newpoolinfo->mddev = mddev;
2208        newpoolinfo->raid_disks = raid_disks;
2209
2210        newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
2211                                 r1bio_pool_free, newpoolinfo);
2212        if (!newpool) {
2213                kfree(newpoolinfo);
2214                return -ENOMEM;
2215        }
2216        newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks, GFP_KERNEL);
2217        if (!newmirrors) {
2218                kfree(newpoolinfo);
2219                mempool_destroy(newpool);
2220                return -ENOMEM;
2221        }
2222
2223        raise_barrier(conf);
2224
2225        /* ok, everything is stopped */
2226        oldpool = conf->r1bio_pool;
2227        conf->r1bio_pool = newpool;
2228
2229        for (d = d2 = 0; d < conf->raid_disks; d++) {
2230                mdk_rdev_t *rdev = conf->mirrors[d].rdev;
2231                if (rdev && rdev->raid_disk != d2) {
2232                        char nm[20];
2233                        sprintf(nm, "rd%d", rdev->raid_disk);
2234                        sysfs_remove_link(&mddev->kobj, nm);
2235                        rdev->raid_disk = d2;
2236                        sprintf(nm, "rd%d", rdev->raid_disk);
2237                        sysfs_remove_link(&mddev->kobj, nm);
2238                        if (sysfs_create_link(&mddev->kobj,
2239                                              &rdev->kobj, nm))
2240                                printk(KERN_WARNING
2241                                       "md/raid1: cannot register "
2242                                       "%s for %s\n",
2243                                       nm, mdname(mddev));
2244                }
2245                if (rdev)
2246                        newmirrors[d2++].rdev = rdev;
2247        }
2248        kfree(conf->mirrors);
2249        conf->mirrors = newmirrors;
2250        kfree(conf->poolinfo);
2251        conf->poolinfo = newpoolinfo;
2252
2253        spin_lock_irqsave(&conf->device_lock, flags);
2254        mddev->degraded += (raid_disks - conf->raid_disks);
2255        spin_unlock_irqrestore(&conf->device_lock, flags);
2256        conf->raid_disks = mddev->raid_disks = raid_disks;
2257        mddev->delta_disks = 0;
2258
2259        conf->last_used = 0; /* just make sure it is in-range */
2260        lower_barrier(conf);
2261
2262        set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2263        md_wakeup_thread(mddev->thread);
2264
2265        mempool_destroy(oldpool);
2266        return 0;
2267}
2268
2269static void raid1_quiesce(mddev_t *mddev, int state)
2270{
2271        conf_t *conf = mddev->private;
2272
2273        switch(state) {
2274        case 1:
2275                raise_barrier(conf);
2276                break;
2277        case 0:
2278                lower_barrier(conf);
2279                break;
2280        }
2281}
2282
2283
2284static struct mdk_personality raid1_personality =
2285{
2286        .name           = "raid1",
2287        .level          = 1,
2288        .owner          = THIS_MODULE,
2289        .make_request   = make_request,
2290        .run            = run,
2291        .stop           = stop,
2292        .status         = status,
2293        .error_handler  = error,
2294        .hot_add_disk   = raid1_add_disk,
2295        .hot_remove_disk= raid1_remove_disk,
2296        .spare_active   = raid1_spare_active,
2297        .sync_request   = sync_request,
2298        .resize         = raid1_resize,
2299        .size           = raid1_size,
2300        .check_reshape  = raid1_reshape,
2301        .quiesce        = raid1_quiesce,
2302};
2303
2304static int __init raid_init(void)
2305{
2306        return register_md_personality(&raid1_personality);
2307}
2308
2309static void raid_exit(void)
2310{
2311        unregister_md_personality(&raid1_personality);
2312}
2313
2314module_init(raid_init);
2315module_exit(raid_exit);
2316MODULE_LICENSE("GPL");
2317MODULE_ALIAS("md-personality-3"); /* RAID1 */
2318MODULE_ALIAS("md-raid1");
2319MODULE_ALIAS("md-level-1");
2320