linux/drivers/md/raid0.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3   raid0.c : Multiple Devices driver for Linux
   4             Copyright (C) 1994-96 Marc ZYNGIER
   5             <zyngier@ufr-info-p7.ibp.fr> or
   6             <maz@gloups.fdn.fr>
   7             Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
   8
   9   RAID-0 management functions.
  10
  11*/
  12
  13#include <linux/blkdev.h>
  14#include <linux/seq_file.h>
  15#include <linux/module.h>
  16#include <linux/slab.h>
  17#include <trace/events/block.h>
  18#include "md.h"
  19#include "raid0.h"
  20#include "raid5.h"
  21
  22static int default_layout = 0;
  23module_param(default_layout, int, 0644);
  24
  25#define UNSUPPORTED_MDDEV_FLAGS         \
  26        ((1L << MD_HAS_JOURNAL) |       \
  27         (1L << MD_JOURNAL_CLEAN) |     \
  28         (1L << MD_FAILFAST_SUPPORTED) |\
  29         (1L << MD_HAS_PPL) |           \
  30         (1L << MD_HAS_MULTIPLE_PPLS))
  31
  32/*
  33 * inform the user of the raid configuration
  34*/
  35static void dump_zones(struct mddev *mddev)
  36{
  37        int j, k;
  38        sector_t zone_size = 0;
  39        sector_t zone_start = 0;
  40        struct r0conf *conf = mddev->private;
  41        int raid_disks = conf->strip_zone[0].nb_dev;
  42        pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
  43                 mdname(mddev),
  44                 conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
  45        for (j = 0; j < conf->nr_strip_zones; j++) {
  46                char line[200];
  47                int len = 0;
  48
  49                for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
  50                        len += snprintf(line+len, 200-len, "%s%pg", k?"/":"",
  51                                conf->devlist[j * raid_disks + k]->bdev);
  52                pr_debug("md: zone%d=[%s]\n", j, line);
  53
  54                zone_size  = conf->strip_zone[j].zone_end - zone_start;
  55                pr_debug("      zone-offset=%10lluKB, device-offset=%10lluKB, size=%10lluKB\n",
  56                        (unsigned long long)zone_start>>1,
  57                        (unsigned long long)conf->strip_zone[j].dev_start>>1,
  58                        (unsigned long long)zone_size>>1);
  59                zone_start = conf->strip_zone[j].zone_end;
  60        }
  61}
  62
  63static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
  64{
  65        int i, c, err;
  66        sector_t curr_zone_end, sectors;
  67        struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
  68        struct strip_zone *zone;
  69        int cnt;
  70        struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
  71        unsigned blksize = 512;
  72
  73        *private_conf = ERR_PTR(-ENOMEM);
  74        if (!conf)
  75                return -ENOMEM;
  76        rdev_for_each(rdev1, mddev) {
  77                pr_debug("md/raid0:%s: looking at %pg\n",
  78                         mdname(mddev),
  79                         rdev1->bdev);
  80                c = 0;
  81
  82                /* round size to chunk_size */
  83                sectors = rdev1->sectors;
  84                sector_div(sectors, mddev->chunk_sectors);
  85                rdev1->sectors = sectors * mddev->chunk_sectors;
  86
  87                blksize = max(blksize, queue_logical_block_size(
  88                                      rdev1->bdev->bd_disk->queue));
  89
  90                rdev_for_each(rdev2, mddev) {
  91                        pr_debug("md/raid0:%s:   comparing %pg(%llu)"
  92                                 " with %pg(%llu)\n",
  93                                 mdname(mddev),
  94                                 rdev1->bdev,
  95                                 (unsigned long long)rdev1->sectors,
  96                                 rdev2->bdev,
  97                                 (unsigned long long)rdev2->sectors);
  98                        if (rdev2 == rdev1) {
  99                                pr_debug("md/raid0:%s:   END\n",
 100                                         mdname(mddev));
 101                                break;
 102                        }
 103                        if (rdev2->sectors == rdev1->sectors) {
 104                                /*
 105                                 * Not unique, don't count it as a new
 106                                 * group
 107                                 */
 108                                pr_debug("md/raid0:%s:   EQUAL\n",
 109                                         mdname(mddev));
 110                                c = 1;
 111                                break;
 112                        }
 113                        pr_debug("md/raid0:%s:   NOT EQUAL\n",
 114                                 mdname(mddev));
 115                }
 116                if (!c) {
 117                        pr_debug("md/raid0:%s:   ==> UNIQUE\n",
 118                                 mdname(mddev));
 119                        conf->nr_strip_zones++;
 120                        pr_debug("md/raid0:%s: %d zones\n",
 121                                 mdname(mddev), conf->nr_strip_zones);
 122                }
 123        }
 124        pr_debug("md/raid0:%s: FINAL %d zones\n",
 125                 mdname(mddev), conf->nr_strip_zones);
 126
 127        /*
 128         * now since we have the hard sector sizes, we can make sure
 129         * chunk size is a multiple of that sector size
 130         */
 131        if ((mddev->chunk_sectors << 9) % blksize) {
 132                pr_warn("md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
 133                        mdname(mddev),
 134                        mddev->chunk_sectors << 9, blksize);
 135                err = -EINVAL;
 136                goto abort;
 137        }
 138
 139        err = -ENOMEM;
 140        conf->strip_zone = kcalloc(conf->nr_strip_zones,
 141                                   sizeof(struct strip_zone),
 142                                   GFP_KERNEL);
 143        if (!conf->strip_zone)
 144                goto abort;
 145        conf->devlist = kzalloc(array3_size(sizeof(struct md_rdev *),
 146                                            conf->nr_strip_zones,
 147                                            mddev->raid_disks),
 148                                GFP_KERNEL);
 149        if (!conf->devlist)
 150                goto abort;
 151
 152        /* The first zone must contain all devices, so here we check that
 153         * there is a proper alignment of slots to devices and find them all
 154         */
 155        zone = &conf->strip_zone[0];
 156        cnt = 0;
 157        smallest = NULL;
 158        dev = conf->devlist;
 159        err = -EINVAL;
 160        rdev_for_each(rdev1, mddev) {
 161                int j = rdev1->raid_disk;
 162
 163                if (mddev->level == 10) {
 164                        /* taking over a raid10-n2 array */
 165                        j /= 2;
 166                        rdev1->new_raid_disk = j;
 167                }
 168
 169                if (mddev->level == 1) {
 170                        /* taiking over a raid1 array-
 171                         * we have only one active disk
 172                         */
 173                        j = 0;
 174                        rdev1->new_raid_disk = j;
 175                }
 176
 177                if (j < 0) {
 178                        pr_warn("md/raid0:%s: remove inactive devices before converting to RAID0\n",
 179                                mdname(mddev));
 180                        goto abort;
 181                }
 182                if (j >= mddev->raid_disks) {
 183                        pr_warn("md/raid0:%s: bad disk number %d - aborting!\n",
 184                                mdname(mddev), j);
 185                        goto abort;
 186                }
 187                if (dev[j]) {
 188                        pr_warn("md/raid0:%s: multiple devices for %d - aborting!\n",
 189                                mdname(mddev), j);
 190                        goto abort;
 191                }
 192                dev[j] = rdev1;
 193
 194                if (!smallest || (rdev1->sectors < smallest->sectors))
 195                        smallest = rdev1;
 196                cnt++;
 197        }
 198        if (cnt != mddev->raid_disks) {
 199                pr_warn("md/raid0:%s: too few disks (%d of %d) - aborting!\n",
 200                        mdname(mddev), cnt, mddev->raid_disks);
 201                goto abort;
 202        }
 203        zone->nb_dev = cnt;
 204        zone->zone_end = smallest->sectors * cnt;
 205
 206        curr_zone_end = zone->zone_end;
 207
 208        /* now do the other zones */
 209        for (i = 1; i < conf->nr_strip_zones; i++)
 210        {
 211                int j;
 212
 213                zone = conf->strip_zone + i;
 214                dev = conf->devlist + i * mddev->raid_disks;
 215
 216                pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
 217                zone->dev_start = smallest->sectors;
 218                smallest = NULL;
 219                c = 0;
 220
 221                for (j=0; j<cnt; j++) {
 222                        rdev = conf->devlist[j];
 223                        if (rdev->sectors <= zone->dev_start) {
 224                                pr_debug("md/raid0:%s: checking %pg ... nope\n",
 225                                         mdname(mddev),
 226                                         rdev->bdev);
 227                                continue;
 228                        }
 229                        pr_debug("md/raid0:%s: checking %pg ..."
 230                                 " contained as device %d\n",
 231                                 mdname(mddev),
 232                                 rdev->bdev, c);
 233                        dev[c] = rdev;
 234                        c++;
 235                        if (!smallest || rdev->sectors < smallest->sectors) {
 236                                smallest = rdev;
 237                                pr_debug("md/raid0:%s:  (%llu) is smallest!.\n",
 238                                         mdname(mddev),
 239                                         (unsigned long long)rdev->sectors);
 240                        }
 241                }
 242
 243                zone->nb_dev = c;
 244                sectors = (smallest->sectors - zone->dev_start) * c;
 245                pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
 246                         mdname(mddev),
 247                         zone->nb_dev, (unsigned long long)sectors);
 248
 249                curr_zone_end += sectors;
 250                zone->zone_end = curr_zone_end;
 251
 252                pr_debug("md/raid0:%s: current zone start: %llu\n",
 253                         mdname(mddev),
 254                         (unsigned long long)smallest->sectors);
 255        }
 256
 257        if (conf->nr_strip_zones == 1 || conf->strip_zone[1].nb_dev == 1) {
 258                conf->layout = RAID0_ORIG_LAYOUT;
 259        } else if (mddev->layout == RAID0_ORIG_LAYOUT ||
 260                   mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) {
 261                conf->layout = mddev->layout;
 262        } else if (default_layout == RAID0_ORIG_LAYOUT ||
 263                   default_layout == RAID0_ALT_MULTIZONE_LAYOUT) {
 264                conf->layout = default_layout;
 265        } else {
 266                pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
 267                       mdname(mddev));
 268                pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
 269                err = -EOPNOTSUPP;
 270                goto abort;
 271        }
 272
 273        pr_debug("md/raid0:%s: done.\n", mdname(mddev));
 274        *private_conf = conf;
 275
 276        return 0;
 277abort:
 278        kfree(conf->strip_zone);
 279        kfree(conf->devlist);
 280        kfree(conf);
 281        *private_conf = ERR_PTR(err);
 282        return err;
 283}
 284
 285/* Find the zone which holds a particular offset
 286 * Update *sectorp to be an offset in that zone
 287 */
 288static struct strip_zone *find_zone(struct r0conf *conf,
 289                                    sector_t *sectorp)
 290{
 291        int i;
 292        struct strip_zone *z = conf->strip_zone;
 293        sector_t sector = *sectorp;
 294
 295        for (i = 0; i < conf->nr_strip_zones; i++)
 296                if (sector < z[i].zone_end) {
 297                        if (i)
 298                                *sectorp = sector - z[i-1].zone_end;
 299                        return z + i;
 300                }
 301        BUG();
 302}
 303
 304/*
 305 * remaps the bio to the target device. we separate two flows.
 306 * power 2 flow and a general flow for the sake of performance
 307*/
 308static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
 309                                sector_t sector, sector_t *sector_offset)
 310{
 311        unsigned int sect_in_chunk;
 312        sector_t chunk;
 313        struct r0conf *conf = mddev->private;
 314        int raid_disks = conf->strip_zone[0].nb_dev;
 315        unsigned int chunk_sects = mddev->chunk_sectors;
 316
 317        if (is_power_of_2(chunk_sects)) {
 318                int chunksect_bits = ffz(~chunk_sects);
 319                /* find the sector offset inside the chunk */
 320                sect_in_chunk  = sector & (chunk_sects - 1);
 321                sector >>= chunksect_bits;
 322                /* chunk in zone */
 323                chunk = *sector_offset;
 324                /* quotient is the chunk in real device*/
 325                sector_div(chunk, zone->nb_dev << chunksect_bits);
 326        } else{
 327                sect_in_chunk = sector_div(sector, chunk_sects);
 328                chunk = *sector_offset;
 329                sector_div(chunk, chunk_sects * zone->nb_dev);
 330        }
 331        /*
 332        *  position the bio over the real device
 333        *  real sector = chunk in device + starting of zone
 334        *       + the position in the chunk
 335        */
 336        *sector_offset = (chunk * chunk_sects) + sect_in_chunk;
 337        return conf->devlist[(zone - conf->strip_zone)*raid_disks
 338                             + sector_div(sector, zone->nb_dev)];
 339}
 340
 341static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
 342{
 343        sector_t array_sectors = 0;
 344        struct md_rdev *rdev;
 345
 346        WARN_ONCE(sectors || raid_disks,
 347                  "%s does not support generic reshape\n", __func__);
 348
 349        rdev_for_each(rdev, mddev)
 350                array_sectors += (rdev->sectors &
 351                                  ~(sector_t)(mddev->chunk_sectors-1));
 352
 353        return array_sectors;
 354}
 355
 356static void free_conf(struct mddev *mddev, struct r0conf *conf)
 357{
 358        kfree(conf->strip_zone);
 359        kfree(conf->devlist);
 360        kfree(conf);
 361}
 362
 363static void raid0_free(struct mddev *mddev, void *priv)
 364{
 365        struct r0conf *conf = priv;
 366
 367        free_conf(mddev, conf);
 368        acct_bioset_exit(mddev);
 369}
 370
 371static int raid0_run(struct mddev *mddev)
 372{
 373        struct r0conf *conf;
 374        int ret;
 375
 376        if (mddev->chunk_sectors == 0) {
 377                pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev));
 378                return -EINVAL;
 379        }
 380        if (md_check_no_bitmap(mddev))
 381                return -EINVAL;
 382
 383        if (acct_bioset_init(mddev)) {
 384                pr_err("md/raid0:%s: alloc acct bioset failed.\n", mdname(mddev));
 385                return -ENOMEM;
 386        }
 387
 388        /* if private is not null, we are here after takeover */
 389        if (mddev->private == NULL) {
 390                ret = create_strip_zones(mddev, &conf);
 391                if (ret < 0)
 392                        goto exit_acct_set;
 393                mddev->private = conf;
 394        }
 395        conf = mddev->private;
 396        if (mddev->queue) {
 397                struct md_rdev *rdev;
 398
 399                blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
 400                blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors);
 401                blk_queue_max_discard_sectors(mddev->queue, UINT_MAX);
 402
 403                blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
 404                blk_queue_io_opt(mddev->queue,
 405                                 (mddev->chunk_sectors << 9) * mddev->raid_disks);
 406
 407                rdev_for_each(rdev, mddev) {
 408                        disk_stack_limits(mddev->gendisk, rdev->bdev,
 409                                          rdev->data_offset << 9);
 410                }
 411        }
 412
 413        /* calculate array device size */
 414        md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
 415
 416        pr_debug("md/raid0:%s: md_size is %llu sectors.\n",
 417                 mdname(mddev),
 418                 (unsigned long long)mddev->array_sectors);
 419
 420        dump_zones(mddev);
 421
 422        ret = md_integrity_register(mddev);
 423        if (ret)
 424                goto free;
 425
 426        return ret;
 427
 428free:
 429        free_conf(mddev, conf);
 430exit_acct_set:
 431        acct_bioset_exit(mddev);
 432        return ret;
 433}
 434
 435static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
 436{
 437        struct r0conf *conf = mddev->private;
 438        struct strip_zone *zone;
 439        sector_t start = bio->bi_iter.bi_sector;
 440        sector_t end;
 441        unsigned int stripe_size;
 442        sector_t first_stripe_index, last_stripe_index;
 443        sector_t start_disk_offset;
 444        unsigned int start_disk_index;
 445        sector_t end_disk_offset;
 446        unsigned int end_disk_index;
 447        unsigned int disk;
 448
 449        zone = find_zone(conf, &start);
 450
 451        if (bio_end_sector(bio) > zone->zone_end) {
 452                struct bio *split = bio_split(bio,
 453                        zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
 454                        &mddev->bio_set);
 455                bio_chain(split, bio);
 456                submit_bio_noacct(bio);
 457                bio = split;
 458                end = zone->zone_end;
 459        } else
 460                end = bio_end_sector(bio);
 461
 462        if (zone != conf->strip_zone)
 463                end = end - zone[-1].zone_end;
 464
 465        /* Now start and end is the offset in zone */
 466        stripe_size = zone->nb_dev * mddev->chunk_sectors;
 467
 468        first_stripe_index = start;
 469        sector_div(first_stripe_index, stripe_size);
 470        last_stripe_index = end;
 471        sector_div(last_stripe_index, stripe_size);
 472
 473        start_disk_index = (int)(start - first_stripe_index * stripe_size) /
 474                mddev->chunk_sectors;
 475        start_disk_offset = ((int)(start - first_stripe_index * stripe_size) %
 476                mddev->chunk_sectors) +
 477                first_stripe_index * mddev->chunk_sectors;
 478        end_disk_index = (int)(end - last_stripe_index * stripe_size) /
 479                mddev->chunk_sectors;
 480        end_disk_offset = ((int)(end - last_stripe_index * stripe_size) %
 481                mddev->chunk_sectors) +
 482                last_stripe_index * mddev->chunk_sectors;
 483
 484        for (disk = 0; disk < zone->nb_dev; disk++) {
 485                sector_t dev_start, dev_end;
 486                struct md_rdev *rdev;
 487
 488                if (disk < start_disk_index)
 489                        dev_start = (first_stripe_index + 1) *
 490                                mddev->chunk_sectors;
 491                else if (disk > start_disk_index)
 492                        dev_start = first_stripe_index * mddev->chunk_sectors;
 493                else
 494                        dev_start = start_disk_offset;
 495
 496                if (disk < end_disk_index)
 497                        dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
 498                else if (disk > end_disk_index)
 499                        dev_end = last_stripe_index * mddev->chunk_sectors;
 500                else
 501                        dev_end = end_disk_offset;
 502
 503                if (dev_end <= dev_start)
 504                        continue;
 505
 506                rdev = conf->devlist[(zone - conf->strip_zone) *
 507                        conf->strip_zone[0].nb_dev + disk];
 508                md_submit_discard_bio(mddev, rdev, bio,
 509                        dev_start + zone->dev_start + rdev->data_offset,
 510                        dev_end - dev_start);
 511        }
 512        bio_endio(bio);
 513}
 514
 515static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
 516{
 517        struct r0conf *conf = mddev->private;
 518        struct strip_zone *zone;
 519        struct md_rdev *tmp_dev;
 520        sector_t bio_sector;
 521        sector_t sector;
 522        sector_t orig_sector;
 523        unsigned chunk_sects;
 524        unsigned sectors;
 525
 526        if (unlikely(bio->bi_opf & REQ_PREFLUSH)
 527            && md_flush_request(mddev, bio))
 528                return true;
 529
 530        if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) {
 531                raid0_handle_discard(mddev, bio);
 532                return true;
 533        }
 534
 535        bio_sector = bio->bi_iter.bi_sector;
 536        sector = bio_sector;
 537        chunk_sects = mddev->chunk_sectors;
 538
 539        sectors = chunk_sects -
 540                (likely(is_power_of_2(chunk_sects))
 541                 ? (sector & (chunk_sects-1))
 542                 : sector_div(sector, chunk_sects));
 543
 544        /* Restore due to sector_div */
 545        sector = bio_sector;
 546
 547        if (sectors < bio_sectors(bio)) {
 548                struct bio *split = bio_split(bio, sectors, GFP_NOIO,
 549                                              &mddev->bio_set);
 550                bio_chain(split, bio);
 551                submit_bio_noacct(bio);
 552                bio = split;
 553        }
 554
 555        if (bio->bi_pool != &mddev->bio_set)
 556                md_account_bio(mddev, &bio);
 557
 558        orig_sector = sector;
 559        zone = find_zone(mddev->private, &sector);
 560        switch (conf->layout) {
 561        case RAID0_ORIG_LAYOUT:
 562                tmp_dev = map_sector(mddev, zone, orig_sector, &sector);
 563                break;
 564        case RAID0_ALT_MULTIZONE_LAYOUT:
 565                tmp_dev = map_sector(mddev, zone, sector, &sector);
 566                break;
 567        default:
 568                WARN(1, "md/raid0:%s: Invalid layout\n", mdname(mddev));
 569                bio_io_error(bio);
 570                return true;
 571        }
 572
 573        if (unlikely(is_mddev_broken(tmp_dev, "raid0"))) {
 574                bio_io_error(bio);
 575                return true;
 576        }
 577
 578        bio_set_dev(bio, tmp_dev->bdev);
 579        bio->bi_iter.bi_sector = sector + zone->dev_start +
 580                tmp_dev->data_offset;
 581
 582        if (mddev->gendisk)
 583                trace_block_bio_remap(bio, disk_devt(mddev->gendisk),
 584                                      bio_sector);
 585        mddev_check_write_zeroes(mddev, bio);
 586        submit_bio_noacct(bio);
 587        return true;
 588}
 589
 590static void raid0_status(struct seq_file *seq, struct mddev *mddev)
 591{
 592        seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
 593        return;
 594}
 595
 596static void *raid0_takeover_raid45(struct mddev *mddev)
 597{
 598        struct md_rdev *rdev;
 599        struct r0conf *priv_conf;
 600
 601        if (mddev->degraded != 1) {
 602                pr_warn("md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
 603                        mdname(mddev),
 604                        mddev->degraded);
 605                return ERR_PTR(-EINVAL);
 606        }
 607
 608        rdev_for_each(rdev, mddev) {
 609                /* check slot number for a disk */
 610                if (rdev->raid_disk == mddev->raid_disks-1) {
 611                        pr_warn("md/raid0:%s: raid5 must have missing parity disk!\n",
 612                                mdname(mddev));
 613                        return ERR_PTR(-EINVAL);
 614                }
 615                rdev->sectors = mddev->dev_sectors;
 616        }
 617
 618        /* Set new parameters */
 619        mddev->new_level = 0;
 620        mddev->new_layout = 0;
 621        mddev->new_chunk_sectors = mddev->chunk_sectors;
 622        mddev->raid_disks--;
 623        mddev->delta_disks = -1;
 624        /* make sure it will be not marked as dirty */
 625        mddev->recovery_cp = MaxSector;
 626        mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
 627
 628        create_strip_zones(mddev, &priv_conf);
 629
 630        return priv_conf;
 631}
 632
 633static void *raid0_takeover_raid10(struct mddev *mddev)
 634{
 635        struct r0conf *priv_conf;
 636
 637        /* Check layout:
 638         *  - far_copies must be 1
 639         *  - near_copies must be 2
 640         *  - disks number must be even
 641         *  - all mirrors must be already degraded
 642         */
 643        if (mddev->layout != ((1 << 8) + 2)) {
 644                pr_warn("md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
 645                        mdname(mddev),
 646                        mddev->layout);
 647                return ERR_PTR(-EINVAL);
 648        }
 649        if (mddev->raid_disks & 1) {
 650                pr_warn("md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
 651                        mdname(mddev));
 652                return ERR_PTR(-EINVAL);
 653        }
 654        if (mddev->degraded != (mddev->raid_disks>>1)) {
 655                pr_warn("md/raid0:%s: All mirrors must be already degraded!\n",
 656                        mdname(mddev));
 657                return ERR_PTR(-EINVAL);
 658        }
 659
 660        /* Set new parameters */
 661        mddev->new_level = 0;
 662        mddev->new_layout = 0;
 663        mddev->new_chunk_sectors = mddev->chunk_sectors;
 664        mddev->delta_disks = - mddev->raid_disks / 2;
 665        mddev->raid_disks += mddev->delta_disks;
 666        mddev->degraded = 0;
 667        /* make sure it will be not marked as dirty */
 668        mddev->recovery_cp = MaxSector;
 669        mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
 670
 671        create_strip_zones(mddev, &priv_conf);
 672        return priv_conf;
 673}
 674
 675static void *raid0_takeover_raid1(struct mddev *mddev)
 676{
 677        struct r0conf *priv_conf;
 678        int chunksect;
 679
 680        /* Check layout:
 681         *  - (N - 1) mirror drives must be already faulty
 682         */
 683        if ((mddev->raid_disks - 1) != mddev->degraded) {
 684                pr_err("md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
 685                       mdname(mddev));
 686                return ERR_PTR(-EINVAL);
 687        }
 688
 689        /*
 690         * a raid1 doesn't have the notion of chunk size, so
 691         * figure out the largest suitable size we can use.
 692         */
 693        chunksect = 64 * 2; /* 64K by default */
 694
 695        /* The array must be an exact multiple of chunksize */
 696        while (chunksect && (mddev->array_sectors & (chunksect - 1)))
 697                chunksect >>= 1;
 698
 699        if ((chunksect << 9) < PAGE_SIZE)
 700                /* array size does not allow a suitable chunk size */
 701                return ERR_PTR(-EINVAL);
 702
 703        /* Set new parameters */
 704        mddev->new_level = 0;
 705        mddev->new_layout = 0;
 706        mddev->new_chunk_sectors = chunksect;
 707        mddev->chunk_sectors = chunksect;
 708        mddev->delta_disks = 1 - mddev->raid_disks;
 709        mddev->raid_disks = 1;
 710        /* make sure it will be not marked as dirty */
 711        mddev->recovery_cp = MaxSector;
 712        mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
 713
 714        create_strip_zones(mddev, &priv_conf);
 715        return priv_conf;
 716}
 717
 718static void *raid0_takeover(struct mddev *mddev)
 719{
 720        /* raid0 can take over:
 721         *  raid4 - if all data disks are active.
 722         *  raid5 - providing it is Raid4 layout and one disk is faulty
 723         *  raid10 - assuming we have all necessary active disks
 724         *  raid1 - with (N -1) mirror drives faulty
 725         */
 726
 727        if (mddev->bitmap) {
 728                pr_warn("md/raid0: %s: cannot takeover array with bitmap\n",
 729                        mdname(mddev));
 730                return ERR_PTR(-EBUSY);
 731        }
 732        if (mddev->level == 4)
 733                return raid0_takeover_raid45(mddev);
 734
 735        if (mddev->level == 5) {
 736                if (mddev->layout == ALGORITHM_PARITY_N)
 737                        return raid0_takeover_raid45(mddev);
 738
 739                pr_warn("md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
 740                        mdname(mddev), ALGORITHM_PARITY_N);
 741        }
 742
 743        if (mddev->level == 10)
 744                return raid0_takeover_raid10(mddev);
 745
 746        if (mddev->level == 1)
 747                return raid0_takeover_raid1(mddev);
 748
 749        pr_warn("Takeover from raid%i to raid0 not supported\n",
 750                mddev->level);
 751
 752        return ERR_PTR(-EINVAL);
 753}
 754
 755static void raid0_quiesce(struct mddev *mddev, int quiesce)
 756{
 757}
 758
 759static struct md_personality raid0_personality=
 760{
 761        .name           = "raid0",
 762        .level          = 0,
 763        .owner          = THIS_MODULE,
 764        .make_request   = raid0_make_request,
 765        .run            = raid0_run,
 766        .free           = raid0_free,
 767        .status         = raid0_status,
 768        .size           = raid0_size,
 769        .takeover       = raid0_takeover,
 770        .quiesce        = raid0_quiesce,
 771};
 772
 773static int __init raid0_init (void)
 774{
 775        return register_md_personality (&raid0_personality);
 776}
 777
 778static void raid0_exit (void)
 779{
 780        unregister_md_personality (&raid0_personality);
 781}
 782
 783module_init(raid0_init);
 784module_exit(raid0_exit);
 785MODULE_LICENSE("GPL");
 786MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
 787MODULE_ALIAS("md-personality-2"); /* RAID0 */
 788MODULE_ALIAS("md-raid0");
 789MODULE_ALIAS("md-level-0");
 790