1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/blkdev.h>
14#include <linux/seq_file.h>
15#include <linux/module.h>
16#include <linux/slab.h>
17#include <trace/events/block.h>
18#include "md.h"
19#include "raid0.h"
20#include "raid5.h"
21
22static int default_layout = 0;
23module_param(default_layout, int, 0644);
24
25#define UNSUPPORTED_MDDEV_FLAGS \
26 ((1L << MD_HAS_JOURNAL) | \
27 (1L << MD_JOURNAL_CLEAN) | \
28 (1L << MD_FAILFAST_SUPPORTED) |\
29 (1L << MD_HAS_PPL) | \
30 (1L << MD_HAS_MULTIPLE_PPLS))
31
32
33
34
35static void dump_zones(struct mddev *mddev)
36{
37 int j, k;
38 sector_t zone_size = 0;
39 sector_t zone_start = 0;
40 char b[BDEVNAME_SIZE];
41 struct r0conf *conf = mddev->private;
42 int raid_disks = conf->strip_zone[0].nb_dev;
43 pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
44 mdname(mddev),
45 conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
46 for (j = 0; j < conf->nr_strip_zones; j++) {
47 char line[200];
48 int len = 0;
49
50 for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
51 len += snprintf(line+len, 200-len, "%s%s", k?"/":"",
52 bdevname(conf->devlist[j*raid_disks
53 + k]->bdev, b));
54 pr_debug("md: zone%d=[%s]\n", j, line);
55
56 zone_size = conf->strip_zone[j].zone_end - zone_start;
57 pr_debug(" zone-offset=%10lluKB, device-offset=%10lluKB, size=%10lluKB\n",
58 (unsigned long long)zone_start>>1,
59 (unsigned long long)conf->strip_zone[j].dev_start>>1,
60 (unsigned long long)zone_size>>1);
61 zone_start = conf->strip_zone[j].zone_end;
62 }
63}
64
65static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
66{
67 int i, c, err;
68 sector_t curr_zone_end, sectors;
69 struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
70 struct strip_zone *zone;
71 int cnt;
72 char b[BDEVNAME_SIZE];
73 char b2[BDEVNAME_SIZE];
74 struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
75 unsigned blksize = 512;
76
77 *private_conf = ERR_PTR(-ENOMEM);
78 if (!conf)
79 return -ENOMEM;
80 rdev_for_each(rdev1, mddev) {
81 pr_debug("md/raid0:%s: looking at %s\n",
82 mdname(mddev),
83 bdevname(rdev1->bdev, b));
84 c = 0;
85
86
87 sectors = rdev1->sectors;
88 sector_div(sectors, mddev->chunk_sectors);
89 rdev1->sectors = sectors * mddev->chunk_sectors;
90
91 blksize = max(blksize, queue_logical_block_size(
92 rdev1->bdev->bd_disk->queue));
93
94 rdev_for_each(rdev2, mddev) {
95 pr_debug("md/raid0:%s: comparing %s(%llu)"
96 " with %s(%llu)\n",
97 mdname(mddev),
98 bdevname(rdev1->bdev,b),
99 (unsigned long long)rdev1->sectors,
100 bdevname(rdev2->bdev,b2),
101 (unsigned long long)rdev2->sectors);
102 if (rdev2 == rdev1) {
103 pr_debug("md/raid0:%s: END\n",
104 mdname(mddev));
105 break;
106 }
107 if (rdev2->sectors == rdev1->sectors) {
108
109
110
111
112 pr_debug("md/raid0:%s: EQUAL\n",
113 mdname(mddev));
114 c = 1;
115 break;
116 }
117 pr_debug("md/raid0:%s: NOT EQUAL\n",
118 mdname(mddev));
119 }
120 if (!c) {
121 pr_debug("md/raid0:%s: ==> UNIQUE\n",
122 mdname(mddev));
123 conf->nr_strip_zones++;
124 pr_debug("md/raid0:%s: %d zones\n",
125 mdname(mddev), conf->nr_strip_zones);
126 }
127 }
128 pr_debug("md/raid0:%s: FINAL %d zones\n",
129 mdname(mddev), conf->nr_strip_zones);
130
131 if (conf->nr_strip_zones == 1) {
132 conf->layout = RAID0_ORIG_LAYOUT;
133 } else if (mddev->layout == RAID0_ORIG_LAYOUT ||
134 mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) {
135 conf->layout = mddev->layout;
136 } else if (default_layout == RAID0_ORIG_LAYOUT ||
137 default_layout == RAID0_ALT_MULTIZONE_LAYOUT) {
138 conf->layout = default_layout;
139 } else {
140 pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
141 mdname(mddev));
142 pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
143 err = -ENOTSUPP;
144 goto abort;
145 }
146
147
148
149
150 if ((mddev->chunk_sectors << 9) % blksize) {
151 pr_warn("md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
152 mdname(mddev),
153 mddev->chunk_sectors << 9, blksize);
154 err = -EINVAL;
155 goto abort;
156 }
157
158 err = -ENOMEM;
159 conf->strip_zone = kcalloc(conf->nr_strip_zones,
160 sizeof(struct strip_zone),
161 GFP_KERNEL);
162 if (!conf->strip_zone)
163 goto abort;
164 conf->devlist = kzalloc(array3_size(sizeof(struct md_rdev *),
165 conf->nr_strip_zones,
166 mddev->raid_disks),
167 GFP_KERNEL);
168 if (!conf->devlist)
169 goto abort;
170
171
172
173
174 zone = &conf->strip_zone[0];
175 cnt = 0;
176 smallest = NULL;
177 dev = conf->devlist;
178 err = -EINVAL;
179 rdev_for_each(rdev1, mddev) {
180 int j = rdev1->raid_disk;
181
182 if (mddev->level == 10) {
183
184 j /= 2;
185 rdev1->new_raid_disk = j;
186 }
187
188 if (mddev->level == 1) {
189
190
191
192 j = 0;
193 rdev1->new_raid_disk = j;
194 }
195
196 if (j < 0) {
197 pr_warn("md/raid0:%s: remove inactive devices before converting to RAID0\n",
198 mdname(mddev));
199 goto abort;
200 }
201 if (j >= mddev->raid_disks) {
202 pr_warn("md/raid0:%s: bad disk number %d - aborting!\n",
203 mdname(mddev), j);
204 goto abort;
205 }
206 if (dev[j]) {
207 pr_warn("md/raid0:%s: multiple devices for %d - aborting!\n",
208 mdname(mddev), j);
209 goto abort;
210 }
211 dev[j] = rdev1;
212
213 if (!smallest || (rdev1->sectors < smallest->sectors))
214 smallest = rdev1;
215 cnt++;
216 }
217 if (cnt != mddev->raid_disks) {
218 pr_warn("md/raid0:%s: too few disks (%d of %d) - aborting!\n",
219 mdname(mddev), cnt, mddev->raid_disks);
220 goto abort;
221 }
222 zone->nb_dev = cnt;
223 zone->zone_end = smallest->sectors * cnt;
224
225 curr_zone_end = zone->zone_end;
226
227
228 for (i = 1; i < conf->nr_strip_zones; i++)
229 {
230 int j;
231
232 zone = conf->strip_zone + i;
233 dev = conf->devlist + i * mddev->raid_disks;
234
235 pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
236 zone->dev_start = smallest->sectors;
237 smallest = NULL;
238 c = 0;
239
240 for (j=0; j<cnt; j++) {
241 rdev = conf->devlist[j];
242 if (rdev->sectors <= zone->dev_start) {
243 pr_debug("md/raid0:%s: checking %s ... nope\n",
244 mdname(mddev),
245 bdevname(rdev->bdev, b));
246 continue;
247 }
248 pr_debug("md/raid0:%s: checking %s ..."
249 " contained as device %d\n",
250 mdname(mddev),
251 bdevname(rdev->bdev, b), c);
252 dev[c] = rdev;
253 c++;
254 if (!smallest || rdev->sectors < smallest->sectors) {
255 smallest = rdev;
256 pr_debug("md/raid0:%s: (%llu) is smallest!.\n",
257 mdname(mddev),
258 (unsigned long long)rdev->sectors);
259 }
260 }
261
262 zone->nb_dev = c;
263 sectors = (smallest->sectors - zone->dev_start) * c;
264 pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
265 mdname(mddev),
266 zone->nb_dev, (unsigned long long)sectors);
267
268 curr_zone_end += sectors;
269 zone->zone_end = curr_zone_end;
270
271 pr_debug("md/raid0:%s: current zone start: %llu\n",
272 mdname(mddev),
273 (unsigned long long)smallest->sectors);
274 }
275
276 pr_debug("md/raid0:%s: done.\n", mdname(mddev));
277 *private_conf = conf;
278
279 return 0;
280abort:
281 kfree(conf->strip_zone);
282 kfree(conf->devlist);
283 kfree(conf);
284 *private_conf = ERR_PTR(err);
285 return err;
286}
287
288
289
290
291static struct strip_zone *find_zone(struct r0conf *conf,
292 sector_t *sectorp)
293{
294 int i;
295 struct strip_zone *z = conf->strip_zone;
296 sector_t sector = *sectorp;
297
298 for (i = 0; i < conf->nr_strip_zones; i++)
299 if (sector < z[i].zone_end) {
300 if (i)
301 *sectorp = sector - z[i-1].zone_end;
302 return z + i;
303 }
304 BUG();
305}
306
307
308
309
310
311static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
312 sector_t sector, sector_t *sector_offset)
313{
314 unsigned int sect_in_chunk;
315 sector_t chunk;
316 struct r0conf *conf = mddev->private;
317 int raid_disks = conf->strip_zone[0].nb_dev;
318 unsigned int chunk_sects = mddev->chunk_sectors;
319
320 if (is_power_of_2(chunk_sects)) {
321 int chunksect_bits = ffz(~chunk_sects);
322
323 sect_in_chunk = sector & (chunk_sects - 1);
324 sector >>= chunksect_bits;
325
326 chunk = *sector_offset;
327
328 sector_div(chunk, zone->nb_dev << chunksect_bits);
329 } else{
330 sect_in_chunk = sector_div(sector, chunk_sects);
331 chunk = *sector_offset;
332 sector_div(chunk, chunk_sects * zone->nb_dev);
333 }
334
335
336
337
338
339 *sector_offset = (chunk * chunk_sects) + sect_in_chunk;
340 return conf->devlist[(zone - conf->strip_zone)*raid_disks
341 + sector_div(sector, zone->nb_dev)];
342}
343
344static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
345{
346 sector_t array_sectors = 0;
347 struct md_rdev *rdev;
348
349 WARN_ONCE(sectors || raid_disks,
350 "%s does not support generic reshape\n", __func__);
351
352 rdev_for_each(rdev, mddev)
353 array_sectors += (rdev->sectors &
354 ~(sector_t)(mddev->chunk_sectors-1));
355
356 return array_sectors;
357}
358
359static void raid0_free(struct mddev *mddev, void *priv);
360
361static int raid0_run(struct mddev *mddev)
362{
363 struct r0conf *conf;
364 int ret;
365
366 if (mddev->chunk_sectors == 0) {
367 pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev));
368 return -EINVAL;
369 }
370 if (md_check_no_bitmap(mddev))
371 return -EINVAL;
372
373
374 if (mddev->private == NULL) {
375 ret = create_strip_zones(mddev, &conf);
376 if (ret < 0)
377 return ret;
378 mddev->private = conf;
379 }
380 conf = mddev->private;
381 if (mddev->queue) {
382 struct md_rdev *rdev;
383 bool discard_supported = false;
384
385 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
386 blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
387 blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors);
388 blk_queue_max_discard_sectors(mddev->queue, UINT_MAX);
389
390 blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
391 blk_queue_io_opt(mddev->queue,
392 (mddev->chunk_sectors << 9) * mddev->raid_disks);
393
394 rdev_for_each(rdev, mddev) {
395 disk_stack_limits(mddev->gendisk, rdev->bdev,
396 rdev->data_offset << 9);
397 if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
398 discard_supported = true;
399 }
400 if (!discard_supported)
401 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue);
402 else
403 blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
404 }
405
406
407 md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
408
409 pr_debug("md/raid0:%s: md_size is %llu sectors.\n",
410 mdname(mddev),
411 (unsigned long long)mddev->array_sectors);
412
413 dump_zones(mddev);
414
415 ret = md_integrity_register(mddev);
416
417 return ret;
418}
419
420static void raid0_free(struct mddev *mddev, void *priv)
421{
422 struct r0conf *conf = priv;
423
424 kfree(conf->strip_zone);
425 kfree(conf->devlist);
426 kfree(conf);
427}
428
429static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
430{
431 struct r0conf *conf = mddev->private;
432 struct strip_zone *zone;
433 sector_t start = bio->bi_iter.bi_sector;
434 sector_t end;
435 unsigned int stripe_size;
436 sector_t first_stripe_index, last_stripe_index;
437 sector_t start_disk_offset;
438 unsigned int start_disk_index;
439 sector_t end_disk_offset;
440 unsigned int end_disk_index;
441 unsigned int disk;
442
443 zone = find_zone(conf, &start);
444
445 if (bio_end_sector(bio) > zone->zone_end) {
446 struct bio *split = bio_split(bio,
447 zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
448 &mddev->bio_set);
449 bio_chain(split, bio);
450 submit_bio_noacct(bio);
451 bio = split;
452 end = zone->zone_end;
453 } else
454 end = bio_end_sector(bio);
455
456 if (zone != conf->strip_zone)
457 end = end - zone[-1].zone_end;
458
459
460 stripe_size = zone->nb_dev * mddev->chunk_sectors;
461
462 first_stripe_index = start;
463 sector_div(first_stripe_index, stripe_size);
464 last_stripe_index = end;
465 sector_div(last_stripe_index, stripe_size);
466
467 start_disk_index = (int)(start - first_stripe_index * stripe_size) /
468 mddev->chunk_sectors;
469 start_disk_offset = ((int)(start - first_stripe_index * stripe_size) %
470 mddev->chunk_sectors) +
471 first_stripe_index * mddev->chunk_sectors;
472 end_disk_index = (int)(end - last_stripe_index * stripe_size) /
473 mddev->chunk_sectors;
474 end_disk_offset = ((int)(end - last_stripe_index * stripe_size) %
475 mddev->chunk_sectors) +
476 last_stripe_index * mddev->chunk_sectors;
477
478 for (disk = 0; disk < zone->nb_dev; disk++) {
479 sector_t dev_start, dev_end;
480 struct md_rdev *rdev;
481
482 if (disk < start_disk_index)
483 dev_start = (first_stripe_index + 1) *
484 mddev->chunk_sectors;
485 else if (disk > start_disk_index)
486 dev_start = first_stripe_index * mddev->chunk_sectors;
487 else
488 dev_start = start_disk_offset;
489
490 if (disk < end_disk_index)
491 dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
492 else if (disk > end_disk_index)
493 dev_end = last_stripe_index * mddev->chunk_sectors;
494 else
495 dev_end = end_disk_offset;
496
497 if (dev_end <= dev_start)
498 continue;
499
500 rdev = conf->devlist[(zone - conf->strip_zone) *
501 conf->strip_zone[0].nb_dev + disk];
502 md_submit_discard_bio(mddev, rdev, bio,
503 dev_start + zone->dev_start + rdev->data_offset,
504 dev_end - dev_start);
505 }
506 bio_endio(bio);
507}
508
509static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
510{
511 struct r0conf *conf = mddev->private;
512 struct strip_zone *zone;
513 struct md_rdev *tmp_dev;
514 sector_t bio_sector;
515 sector_t sector;
516 sector_t orig_sector;
517 unsigned chunk_sects;
518 unsigned sectors;
519
520 if (unlikely(bio->bi_opf & REQ_PREFLUSH)
521 && md_flush_request(mddev, bio))
522 return true;
523
524 if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) {
525 raid0_handle_discard(mddev, bio);
526 return true;
527 }
528
529 bio_sector = bio->bi_iter.bi_sector;
530 sector = bio_sector;
531 chunk_sects = mddev->chunk_sectors;
532
533 sectors = chunk_sects -
534 (likely(is_power_of_2(chunk_sects))
535 ? (sector & (chunk_sects-1))
536 : sector_div(sector, chunk_sects));
537
538
539 sector = bio_sector;
540
541 if (sectors < bio_sectors(bio)) {
542 struct bio *split = bio_split(bio, sectors, GFP_NOIO,
543 &mddev->bio_set);
544 bio_chain(split, bio);
545 submit_bio_noacct(bio);
546 bio = split;
547 }
548
549 if (bio->bi_pool != &mddev->bio_set)
550 md_account_bio(mddev, &bio);
551
552 orig_sector = sector;
553 zone = find_zone(mddev->private, §or);
554 switch (conf->layout) {
555 case RAID0_ORIG_LAYOUT:
556 tmp_dev = map_sector(mddev, zone, orig_sector, §or);
557 break;
558 case RAID0_ALT_MULTIZONE_LAYOUT:
559 tmp_dev = map_sector(mddev, zone, sector, §or);
560 break;
561 default:
562 WARN(1, "md/raid0:%s: Invalid layout\n", mdname(mddev));
563 bio_io_error(bio);
564 return true;
565 }
566
567 if (unlikely(is_mddev_broken(tmp_dev, "raid0"))) {
568 bio_io_error(bio);
569 return true;
570 }
571
572 bio_set_dev(bio, tmp_dev->bdev);
573 bio->bi_iter.bi_sector = sector + zone->dev_start +
574 tmp_dev->data_offset;
575
576 if (mddev->gendisk)
577 trace_block_bio_remap(bio, disk_devt(mddev->gendisk),
578 bio_sector);
579 mddev_check_writesame(mddev, bio);
580 mddev_check_write_zeroes(mddev, bio);
581 submit_bio_noacct(bio);
582 return true;
583}
584
585static void raid0_status(struct seq_file *seq, struct mddev *mddev)
586{
587 seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
588 return;
589}
590
591static void *raid0_takeover_raid45(struct mddev *mddev)
592{
593 struct md_rdev *rdev;
594 struct r0conf *priv_conf;
595
596 if (mddev->degraded != 1) {
597 pr_warn("md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
598 mdname(mddev),
599 mddev->degraded);
600 return ERR_PTR(-EINVAL);
601 }
602
603 rdev_for_each(rdev, mddev) {
604
605 if (rdev->raid_disk == mddev->raid_disks-1) {
606 pr_warn("md/raid0:%s: raid5 must have missing parity disk!\n",
607 mdname(mddev));
608 return ERR_PTR(-EINVAL);
609 }
610 rdev->sectors = mddev->dev_sectors;
611 }
612
613
614 mddev->new_level = 0;
615 mddev->new_layout = 0;
616 mddev->new_chunk_sectors = mddev->chunk_sectors;
617 mddev->raid_disks--;
618 mddev->delta_disks = -1;
619
620 mddev->recovery_cp = MaxSector;
621 mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
622
623 create_strip_zones(mddev, &priv_conf);
624
625 return priv_conf;
626}
627
628static void *raid0_takeover_raid10(struct mddev *mddev)
629{
630 struct r0conf *priv_conf;
631
632
633
634
635
636
637
638 if (mddev->layout != ((1 << 8) + 2)) {
639 pr_warn("md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
640 mdname(mddev),
641 mddev->layout);
642 return ERR_PTR(-EINVAL);
643 }
644 if (mddev->raid_disks & 1) {
645 pr_warn("md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
646 mdname(mddev));
647 return ERR_PTR(-EINVAL);
648 }
649 if (mddev->degraded != (mddev->raid_disks>>1)) {
650 pr_warn("md/raid0:%s: All mirrors must be already degraded!\n",
651 mdname(mddev));
652 return ERR_PTR(-EINVAL);
653 }
654
655
656 mddev->new_level = 0;
657 mddev->new_layout = 0;
658 mddev->new_chunk_sectors = mddev->chunk_sectors;
659 mddev->delta_disks = - mddev->raid_disks / 2;
660 mddev->raid_disks += mddev->delta_disks;
661 mddev->degraded = 0;
662
663 mddev->recovery_cp = MaxSector;
664 mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
665
666 create_strip_zones(mddev, &priv_conf);
667 return priv_conf;
668}
669
670static void *raid0_takeover_raid1(struct mddev *mddev)
671{
672 struct r0conf *priv_conf;
673 int chunksect;
674
675
676
677
678 if ((mddev->raid_disks - 1) != mddev->degraded) {
679 pr_err("md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
680 mdname(mddev));
681 return ERR_PTR(-EINVAL);
682 }
683
684
685
686
687
688 chunksect = 64 * 2;
689
690
691 while (chunksect && (mddev->array_sectors & (chunksect - 1)))
692 chunksect >>= 1;
693
694 if ((chunksect << 9) < PAGE_SIZE)
695
696 return ERR_PTR(-EINVAL);
697
698
699 mddev->new_level = 0;
700 mddev->new_layout = 0;
701 mddev->new_chunk_sectors = chunksect;
702 mddev->chunk_sectors = chunksect;
703 mddev->delta_disks = 1 - mddev->raid_disks;
704 mddev->raid_disks = 1;
705
706 mddev->recovery_cp = MaxSector;
707 mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
708
709 create_strip_zones(mddev, &priv_conf);
710 return priv_conf;
711}
712
713static void *raid0_takeover(struct mddev *mddev)
714{
715
716
717
718
719
720
721
722 if (mddev->bitmap) {
723 pr_warn("md/raid0: %s: cannot takeover array with bitmap\n",
724 mdname(mddev));
725 return ERR_PTR(-EBUSY);
726 }
727 if (mddev->level == 4)
728 return raid0_takeover_raid45(mddev);
729
730 if (mddev->level == 5) {
731 if (mddev->layout == ALGORITHM_PARITY_N)
732 return raid0_takeover_raid45(mddev);
733
734 pr_warn("md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
735 mdname(mddev), ALGORITHM_PARITY_N);
736 }
737
738 if (mddev->level == 10)
739 return raid0_takeover_raid10(mddev);
740
741 if (mddev->level == 1)
742 return raid0_takeover_raid1(mddev);
743
744 pr_warn("Takeover from raid%i to raid0 not supported\n",
745 mddev->level);
746
747 return ERR_PTR(-EINVAL);
748}
749
750static void raid0_quiesce(struct mddev *mddev, int quiesce)
751{
752}
753
754static struct md_personality raid0_personality=
755{
756 .name = "raid0",
757 .level = 0,
758 .owner = THIS_MODULE,
759 .make_request = raid0_make_request,
760 .run = raid0_run,
761 .free = raid0_free,
762 .status = raid0_status,
763 .size = raid0_size,
764 .takeover = raid0_takeover,
765 .quiesce = raid0_quiesce,
766};
767
768static int __init raid0_init (void)
769{
770 return register_md_personality (&raid0_personality);
771}
772
773static void raid0_exit (void)
774{
775 unregister_md_personality (&raid0_personality);
776}
777
778module_init(raid0_init);
779module_exit(raid0_exit);
780MODULE_LICENSE("GPL");
781MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
782MODULE_ALIAS("md-personality-2");
783MODULE_ALIAS("md-raid0");
784MODULE_ALIAS("md-level-0");
785