1
2
3
4
5
6
7
8#include <linux/slab.h>
9#include <linux/module.h>
10
11#include "md.h"
12#include "raid1.h"
13#include "raid5.h"
14#include "raid10.h"
15#include "bitmap.h"
16
17#include <linux/device-mapper.h>
18
19#define DM_MSG_PREFIX "raid"
20
21
22
23
24
25#define FirstUse 10
26
27struct raid_dev {
28
29
30
31
32
33
34
35
36
37
38
39
40 struct dm_dev *meta_dev;
41 struct dm_dev *data_dev;
42 struct md_rdev rdev;
43};
44
45
46
47
48#define DMPF_SYNC 0x1
49#define DMPF_NOSYNC 0x2
50#define DMPF_REBUILD 0x4
51#define DMPF_DAEMON_SLEEP 0x8
52#define DMPF_MIN_RECOVERY_RATE 0x10
53#define DMPF_MAX_RECOVERY_RATE 0x20
54#define DMPF_MAX_WRITE_BEHIND 0x40
55#define DMPF_STRIPE_CACHE 0x80
56#define DMPF_REGION_SIZE 0x100
57#define DMPF_RAID10_COPIES 0x200
58#define DMPF_RAID10_FORMAT 0x400
59
60struct raid_set {
61 struct dm_target *ti;
62
63 uint32_t bitmap_loaded;
64 uint32_t print_flags;
65
66 struct mddev md;
67 struct raid_type *raid_type;
68 struct dm_target_callbacks callbacks;
69
70 struct raid_dev dev[0];
71};
72
73
74static struct raid_type {
75 const char *name;
76 const char *descr;
77 const unsigned parity_devs;
78 const unsigned minimal_devs;
79 const unsigned level;
80 const unsigned algorithm;
81} raid_types[] = {
82 {"raid1", "RAID1 (mirroring)", 0, 2, 1, 0 },
83 {"raid10", "RAID10 (striped mirrors)", 0, 2, 10, UINT_MAX },
84 {"raid4", "RAID4 (dedicated parity disk)", 1, 2, 5, ALGORITHM_PARITY_0},
85 {"raid5_la", "RAID5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC},
86 {"raid5_ra", "RAID5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC},
87 {"raid5_ls", "RAID5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC},
88 {"raid5_rs", "RAID5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC},
89 {"raid6_zr", "RAID6 (zero restart)", 2, 4, 6, ALGORITHM_ROTATING_ZERO_RESTART},
90 {"raid6_nr", "RAID6 (N restart)", 2, 4, 6, ALGORITHM_ROTATING_N_RESTART},
91 {"raid6_nc", "RAID6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE}
92};
93
94static char *raid10_md_layout_to_format(int layout)
95{
96
97
98
99
100 if ((layout & 0x10000) && (layout & 0x20000))
101 return "offset";
102
103 if ((layout & 0xFF) > 1)
104 return "near";
105
106 return "far";
107}
108
109static unsigned raid10_md_layout_to_copies(int layout)
110{
111 if ((layout & 0xFF) > 1)
112 return layout & 0xFF;
113 return (layout >> 8) & 0xFF;
114}
115
116static int raid10_format_to_md_layout(char *format, unsigned copies)
117{
118 unsigned n = 1, f = 1;
119
120 if (!strcmp("near", format))
121 n = copies;
122 else
123 f = copies;
124
125 if (!strcmp("offset", format))
126 return 0x30000 | (f << 8) | n;
127
128 if (!strcmp("far", format))
129 return 0x20000 | (f << 8) | n;
130
131 return (f << 8) | n;
132}
133
134static struct raid_type *get_raid_type(char *name)
135{
136 int i;
137
138 for (i = 0; i < ARRAY_SIZE(raid_types); i++)
139 if (!strcmp(raid_types[i].name, name))
140 return &raid_types[i];
141
142 return NULL;
143}
144
145static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *raid_type, unsigned raid_devs)
146{
147 unsigned i;
148 struct raid_set *rs;
149
150 if (raid_devs <= raid_type->parity_devs) {
151 ti->error = "Insufficient number of devices";
152 return ERR_PTR(-EINVAL);
153 }
154
155 rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL);
156 if (!rs) {
157 ti->error = "Cannot allocate raid context";
158 return ERR_PTR(-ENOMEM);
159 }
160
161 mddev_init(&rs->md);
162
163 rs->ti = ti;
164 rs->raid_type = raid_type;
165 rs->md.raid_disks = raid_devs;
166 rs->md.level = raid_type->level;
167 rs->md.new_level = rs->md.level;
168 rs->md.layout = raid_type->algorithm;
169 rs->md.new_layout = rs->md.layout;
170 rs->md.delta_disks = 0;
171 rs->md.recovery_cp = 0;
172
173 for (i = 0; i < raid_devs; i++)
174 md_rdev_init(&rs->dev[i].rdev);
175
176
177
178
179
180
181
182
183
184
185 return rs;
186}
187
188static void context_free(struct raid_set *rs)
189{
190 int i;
191
192 for (i = 0; i < rs->md.raid_disks; i++) {
193 if (rs->dev[i].meta_dev)
194 dm_put_device(rs->ti, rs->dev[i].meta_dev);
195 md_rdev_clear(&rs->dev[i].rdev);
196 if (rs->dev[i].data_dev)
197 dm_put_device(rs->ti, rs->dev[i].data_dev);
198 }
199
200 kfree(rs);
201}
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219static int dev_parms(struct raid_set *rs, char **argv)
220{
221 int i;
222 int rebuild = 0;
223 int metadata_available = 0;
224 int ret = 0;
225
226 for (i = 0; i < rs->md.raid_disks; i++, argv += 2) {
227 rs->dev[i].rdev.raid_disk = i;
228
229 rs->dev[i].meta_dev = NULL;
230 rs->dev[i].data_dev = NULL;
231
232
233
234
235
236 rs->dev[i].rdev.data_offset = 0;
237 rs->dev[i].rdev.mddev = &rs->md;
238
239 if (strcmp(argv[0], "-")) {
240 ret = dm_get_device(rs->ti, argv[0],
241 dm_table_get_mode(rs->ti->table),
242 &rs->dev[i].meta_dev);
243 rs->ti->error = "RAID metadata device lookup failure";
244 if (ret)
245 return ret;
246
247 rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL);
248 if (!rs->dev[i].rdev.sb_page)
249 return -ENOMEM;
250 }
251
252 if (!strcmp(argv[1], "-")) {
253 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) &&
254 (!rs->dev[i].rdev.recovery_offset)) {
255 rs->ti->error = "Drive designated for rebuild not specified";
256 return -EINVAL;
257 }
258
259 rs->ti->error = "No data device supplied with metadata device";
260 if (rs->dev[i].meta_dev)
261 return -EINVAL;
262
263 continue;
264 }
265
266 ret = dm_get_device(rs->ti, argv[1],
267 dm_table_get_mode(rs->ti->table),
268 &rs->dev[i].data_dev);
269 if (ret) {
270 rs->ti->error = "RAID device lookup failure";
271 return ret;
272 }
273
274 if (rs->dev[i].meta_dev) {
275 metadata_available = 1;
276 rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev;
277 }
278 rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev;
279 list_add(&rs->dev[i].rdev.same_set, &rs->md.disks);
280 if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
281 rebuild++;
282 }
283
284 if (metadata_available) {
285 rs->md.external = 0;
286 rs->md.persistent = 1;
287 rs->md.major_version = 2;
288 } else if (rebuild && !rs->md.recovery_cp) {
289
290
291
292
293
294
295
296
297
298
299
300 DMERR("Unable to rebuild drive while array is not in-sync");
301 rs->ti->error = "RAID device lookup failure";
302 return -EINVAL;
303 }
304
305 return 0;
306}
307
308
309
310
311
312
313
314
315
316
317
318static int validate_region_size(struct raid_set *rs, unsigned long region_size)
319{
320 unsigned long min_region_size = rs->ti->len / (1 << 21);
321
322 if (!region_size) {
323
324
325
326 if (min_region_size > (1 << 13)) {
327
328 if (min_region_size & (min_region_size - 1))
329 region_size = 1 << fls(region_size);
330 DMINFO("Choosing default region size of %lu sectors",
331 region_size);
332 } else {
333 DMINFO("Choosing default region size of 4MiB");
334 region_size = 1 << 13;
335 }
336 } else {
337
338
339
340 if (region_size > rs->ti->len) {
341 rs->ti->error = "Supplied region size is too large";
342 return -EINVAL;
343 }
344
345 if (region_size < min_region_size) {
346 DMERR("Supplied region_size (%lu sectors) below minimum (%lu)",
347 region_size, min_region_size);
348 rs->ti->error = "Supplied region size is too small";
349 return -EINVAL;
350 }
351
352 if (!is_power_of_2(region_size)) {
353 rs->ti->error = "Region size is not a power of 2";
354 return -EINVAL;
355 }
356
357 if (region_size < rs->md.chunk_sectors) {
358 rs->ti->error = "Region size is smaller than the chunk size";
359 return -EINVAL;
360 }
361 }
362
363
364
365
366 rs->md.bitmap_info.chunksize = (region_size << 9);
367
368 return 0;
369}
370
371
372
373
374
375
376
377
378
379
380static int validate_raid_redundancy(struct raid_set *rs)
381{
382 unsigned i, rebuild_cnt = 0;
383 unsigned rebuilds_per_group, copies, d;
384 unsigned group_size, last_group_start;
385
386 for (i = 0; i < rs->md.raid_disks; i++)
387 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
388 !rs->dev[i].rdev.sb_page)
389 rebuild_cnt++;
390
391 switch (rs->raid_type->level) {
392 case 1:
393 if (rebuild_cnt >= rs->md.raid_disks)
394 goto too_many;
395 break;
396 case 4:
397 case 5:
398 case 6:
399 if (rebuild_cnt > rs->raid_type->parity_devs)
400 goto too_many;
401 break;
402 case 10:
403 copies = raid10_md_layout_to_copies(rs->md.layout);
404 if (rebuild_cnt < copies)
405 break;
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421 if (!strcmp("near", raid10_md_layout_to_format(rs->md.layout))) {
422 for (i = 0; i < rs->md.raid_disks * copies; i++) {
423 if (!(i % copies))
424 rebuilds_per_group = 0;
425 d = i % rs->md.raid_disks;
426 if ((!rs->dev[d].rdev.sb_page ||
427 !test_bit(In_sync, &rs->dev[d].rdev.flags)) &&
428 (++rebuilds_per_group >= copies))
429 goto too_many;
430 }
431 break;
432 }
433
434
435
436
437
438
439
440
441
442
443
444
445
446 group_size = (rs->md.raid_disks / copies);
447 last_group_start = (rs->md.raid_disks / group_size) - 1;
448 last_group_start *= group_size;
449 for (i = 0; i < rs->md.raid_disks; i++) {
450 if (!(i % copies) && !(i > last_group_start))
451 rebuilds_per_group = 0;
452 if ((!rs->dev[i].rdev.sb_page ||
453 !test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
454 (++rebuilds_per_group >= copies))
455 goto too_many;
456 }
457 break;
458 default:
459 if (rebuild_cnt)
460 return -EINVAL;
461 }
462
463 return 0;
464
465too_many:
466 return -EINVAL;
467}
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492static int parse_raid_params(struct raid_set *rs, char **argv,
493 unsigned num_raid_params)
494{
495 char *raid10_format = "near";
496 unsigned raid10_copies = 2;
497 unsigned i;
498 unsigned long value, region_size = 0;
499 sector_t sectors_per_dev = rs->ti->len;
500 sector_t max_io_len;
501 char *key;
502
503
504
505
506
507 if ((strict_strtoul(argv[0], 10, &value) < 0)) {
508 rs->ti->error = "Bad chunk size";
509 return -EINVAL;
510 } else if (rs->raid_type->level == 1) {
511 if (value)
512 DMERR("Ignoring chunk size parameter for RAID 1");
513 value = 0;
514 } else if (!is_power_of_2(value)) {
515 rs->ti->error = "Chunk size must be a power of 2";
516 return -EINVAL;
517 } else if (value < 8) {
518 rs->ti->error = "Chunk size value is too small";
519 return -EINVAL;
520 }
521
522 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value;
523 argv++;
524 num_raid_params--;
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543 for (i = 0; i < rs->md.raid_disks; i++) {
544 set_bit(In_sync, &rs->dev[i].rdev.flags);
545 rs->dev[i].rdev.recovery_offset = MaxSector;
546 }
547
548
549
550
551 for (i = 0; i < num_raid_params; i++) {
552 if (!strcasecmp(argv[i], "nosync")) {
553 rs->md.recovery_cp = MaxSector;
554 rs->print_flags |= DMPF_NOSYNC;
555 continue;
556 }
557 if (!strcasecmp(argv[i], "sync")) {
558 rs->md.recovery_cp = 0;
559 rs->print_flags |= DMPF_SYNC;
560 continue;
561 }
562
563
564 if ((i + 1) >= num_raid_params) {
565 rs->ti->error = "Wrong number of raid parameters given";
566 return -EINVAL;
567 }
568
569 key = argv[i++];
570
571
572 if (!strcasecmp(key, "raid10_format")) {
573 if (rs->raid_type->level != 10) {
574 rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type";
575 return -EINVAL;
576 }
577 if (strcmp("near", argv[i]) &&
578 strcmp("far", argv[i]) &&
579 strcmp("offset", argv[i])) {
580 rs->ti->error = "Invalid 'raid10_format' value given";
581 return -EINVAL;
582 }
583 raid10_format = argv[i];
584 rs->print_flags |= DMPF_RAID10_FORMAT;
585 continue;
586 }
587
588 if (strict_strtoul(argv[i], 10, &value) < 0) {
589 rs->ti->error = "Bad numerical argument given in raid params";
590 return -EINVAL;
591 }
592
593
594 if (!strcasecmp(key, "rebuild")) {
595 if (value >= rs->md.raid_disks) {
596 rs->ti->error = "Invalid rebuild index given";
597 return -EINVAL;
598 }
599 clear_bit(In_sync, &rs->dev[value].rdev.flags);
600 rs->dev[value].rdev.recovery_offset = 0;
601 rs->print_flags |= DMPF_REBUILD;
602 } else if (!strcasecmp(key, "write_mostly")) {
603 if (rs->raid_type->level != 1) {
604 rs->ti->error = "write_mostly option is only valid for RAID1";
605 return -EINVAL;
606 }
607 if (value >= rs->md.raid_disks) {
608 rs->ti->error = "Invalid write_mostly drive index given";
609 return -EINVAL;
610 }
611 set_bit(WriteMostly, &rs->dev[value].rdev.flags);
612 } else if (!strcasecmp(key, "max_write_behind")) {
613 if (rs->raid_type->level != 1) {
614 rs->ti->error = "max_write_behind option is only valid for RAID1";
615 return -EINVAL;
616 }
617 rs->print_flags |= DMPF_MAX_WRITE_BEHIND;
618
619
620
621
622
623 value /= 2;
624 if (value > COUNTER_MAX) {
625 rs->ti->error = "Max write-behind limit out of range";
626 return -EINVAL;
627 }
628 rs->md.bitmap_info.max_write_behind = value;
629 } else if (!strcasecmp(key, "daemon_sleep")) {
630 rs->print_flags |= DMPF_DAEMON_SLEEP;
631 if (!value || (value > MAX_SCHEDULE_TIMEOUT)) {
632 rs->ti->error = "daemon sleep period out of range";
633 return -EINVAL;
634 }
635 rs->md.bitmap_info.daemon_sleep = value;
636 } else if (!strcasecmp(key, "stripe_cache")) {
637 rs->print_flags |= DMPF_STRIPE_CACHE;
638
639
640
641
642
643 value /= 2;
644
645 if ((rs->raid_type->level != 5) &&
646 (rs->raid_type->level != 6)) {
647 rs->ti->error = "Inappropriate argument: stripe_cache";
648 return -EINVAL;
649 }
650 if (raid5_set_cache_size(&rs->md, (int)value)) {
651 rs->ti->error = "Bad stripe_cache size";
652 return -EINVAL;
653 }
654 } else if (!strcasecmp(key, "min_recovery_rate")) {
655 rs->print_flags |= DMPF_MIN_RECOVERY_RATE;
656 if (value > INT_MAX) {
657 rs->ti->error = "min_recovery_rate out of range";
658 return -EINVAL;
659 }
660 rs->md.sync_speed_min = (int)value;
661 } else if (!strcasecmp(key, "max_recovery_rate")) {
662 rs->print_flags |= DMPF_MAX_RECOVERY_RATE;
663 if (value > INT_MAX) {
664 rs->ti->error = "max_recovery_rate out of range";
665 return -EINVAL;
666 }
667 rs->md.sync_speed_max = (int)value;
668 } else if (!strcasecmp(key, "region_size")) {
669 rs->print_flags |= DMPF_REGION_SIZE;
670 region_size = value;
671 } else if (!strcasecmp(key, "raid10_copies") &&
672 (rs->raid_type->level == 10)) {
673 if ((value < 2) || (value > 0xFF)) {
674 rs->ti->error = "Bad value for 'raid10_copies'";
675 return -EINVAL;
676 }
677 rs->print_flags |= DMPF_RAID10_COPIES;
678 raid10_copies = value;
679 } else {
680 DMERR("Unable to parse RAID parameter: %s", key);
681 rs->ti->error = "Unable to parse RAID parameters";
682 return -EINVAL;
683 }
684 }
685
686 if (validate_region_size(rs, region_size))
687 return -EINVAL;
688
689 if (rs->md.chunk_sectors)
690 max_io_len = rs->md.chunk_sectors;
691 else
692 max_io_len = region_size;
693
694 if (dm_set_target_max_io_len(rs->ti, max_io_len))
695 return -EINVAL;
696
697 if (rs->raid_type->level == 10) {
698 if (raid10_copies > rs->md.raid_disks) {
699 rs->ti->error = "Not enough devices to satisfy specification";
700 return -EINVAL;
701 }
702
703
704
705
706
707 if (strcmp("near", raid10_format) && (raid10_copies > 2)) {
708 rs->ti->error = "Too many copies for given RAID10 format.";
709 return -EINVAL;
710 }
711
712
713 sectors_per_dev = rs->ti->len * raid10_copies;
714 sector_div(sectors_per_dev, rs->md.raid_disks);
715
716 rs->md.layout = raid10_format_to_md_layout(raid10_format,
717 raid10_copies);
718 rs->md.new_layout = rs->md.layout;
719 } else if ((rs->raid_type->level > 1) &&
720 sector_div(sectors_per_dev,
721 (rs->md.raid_disks - rs->raid_type->parity_devs))) {
722 rs->ti->error = "Target length not divisible by number of data devices";
723 return -EINVAL;
724 }
725 rs->md.dev_sectors = sectors_per_dev;
726
727
728 rs->md.persistent = 0;
729 rs->md.external = 1;
730
731 return 0;
732}
733
734static void do_table_event(struct work_struct *ws)
735{
736 struct raid_set *rs = container_of(ws, struct raid_set, md.event_work);
737
738 dm_table_event(rs->ti->table);
739}
740
741static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
742{
743 struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
744
745 if (rs->raid_type->level == 1)
746 return md_raid1_congested(&rs->md, bits);
747
748 if (rs->raid_type->level == 10)
749 return md_raid10_congested(&rs->md, bits);
750
751 return md_raid5_congested(&rs->md, bits);
752}
753
754
755
756
757
758#define DM_RAID_MAGIC 0x64526D44
759struct dm_raid_superblock {
760 __le32 magic;
761 __le32 features;
762
763 __le32 num_devices;
764 __le32 array_position;
765
766 __le64 events;
767 __le64 failed_devices;
768
769
770
771
772
773 __le64 disk_recovery_offset;
774
775
776
777
778
779 __le64 array_resync_offset;
780
781
782
783
784 __le32 level;
785 __le32 layout;
786 __le32 stripe_sectors;
787
788 __u8 pad[452];
789
790} __packed;
791
792static int read_disk_sb(struct md_rdev *rdev, int size)
793{
794 BUG_ON(!rdev->sb_page);
795
796 if (rdev->sb_loaded)
797 return 0;
798
799 if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, 1)) {
800 DMERR("Failed to read superblock of device at position %d",
801 rdev->raid_disk);
802 md_error(rdev->mddev, rdev);
803 return -EINVAL;
804 }
805
806 rdev->sb_loaded = 1;
807
808 return 0;
809}
810
811static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
812{
813 int i;
814 uint64_t failed_devices;
815 struct dm_raid_superblock *sb;
816 struct raid_set *rs = container_of(mddev, struct raid_set, md);
817
818 sb = page_address(rdev->sb_page);
819 failed_devices = le64_to_cpu(sb->failed_devices);
820
821 for (i = 0; i < mddev->raid_disks; i++)
822 if (!rs->dev[i].data_dev ||
823 test_bit(Faulty, &(rs->dev[i].rdev.flags)))
824 failed_devices |= (1ULL << i);
825
826 memset(sb, 0, sizeof(*sb));
827
828 sb->magic = cpu_to_le32(DM_RAID_MAGIC);
829 sb->features = cpu_to_le32(0);
830
831 sb->num_devices = cpu_to_le32(mddev->raid_disks);
832 sb->array_position = cpu_to_le32(rdev->raid_disk);
833
834 sb->events = cpu_to_le64(mddev->events);
835 sb->failed_devices = cpu_to_le64(failed_devices);
836
837 sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset);
838 sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp);
839
840 sb->level = cpu_to_le32(mddev->level);
841 sb->layout = cpu_to_le32(mddev->layout);
842 sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors);
843}
844
845
846
847
848
849
850
851
852
853static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
854{
855 int ret;
856 struct dm_raid_superblock *sb;
857 struct dm_raid_superblock *refsb;
858 uint64_t events_sb, events_refsb;
859
860 rdev->sb_start = 0;
861 rdev->sb_size = sizeof(*sb);
862
863 ret = read_disk_sb(rdev, rdev->sb_size);
864 if (ret)
865 return ret;
866
867 sb = page_address(rdev->sb_page);
868
869
870
871
872
873
874 if ((sb->magic != cpu_to_le32(DM_RAID_MAGIC)) ||
875 (!test_bit(In_sync, &rdev->flags) && !rdev->recovery_offset)) {
876 super_sync(rdev->mddev, rdev);
877
878 set_bit(FirstUse, &rdev->flags);
879
880
881 set_bit(MD_CHANGE_DEVS, &rdev->mddev->flags);
882
883
884 return refdev ? 0 : 1;
885 }
886
887 if (!refdev)
888 return 1;
889
890 events_sb = le64_to_cpu(sb->events);
891
892 refsb = page_address(refdev->sb_page);
893 events_refsb = le64_to_cpu(refsb->events);
894
895 return (events_sb > events_refsb) ? 1 : 0;
896}
897
898static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev)
899{
900 int role;
901 struct raid_set *rs = container_of(mddev, struct raid_set, md);
902 uint64_t events_sb;
903 uint64_t failed_devices;
904 struct dm_raid_superblock *sb;
905 uint32_t new_devs = 0;
906 uint32_t rebuilds = 0;
907 struct md_rdev *r;
908 struct dm_raid_superblock *sb2;
909
910 sb = page_address(rdev->sb_page);
911 events_sb = le64_to_cpu(sb->events);
912 failed_devices = le64_to_cpu(sb->failed_devices);
913
914
915
916
917 mddev->events = events_sb ? : 1;
918
919
920
921
922 if (le32_to_cpu(sb->level) != mddev->level) {
923 DMERR("Reshaping arrays not yet supported. (RAID level change)");
924 return -EINVAL;
925 }
926 if (le32_to_cpu(sb->layout) != mddev->layout) {
927 DMERR("Reshaping arrays not yet supported. (RAID layout change)");
928 DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout);
929 DMERR(" Old layout: %s w/ %d copies",
930 raid10_md_layout_to_format(le32_to_cpu(sb->layout)),
931 raid10_md_layout_to_copies(le32_to_cpu(sb->layout)));
932 DMERR(" New layout: %s w/ %d copies",
933 raid10_md_layout_to_format(mddev->layout),
934 raid10_md_layout_to_copies(mddev->layout));
935 return -EINVAL;
936 }
937 if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) {
938 DMERR("Reshaping arrays not yet supported. (stripe sectors change)");
939 return -EINVAL;
940 }
941
942
943 if ((rs->raid_type->level != 1) &&
944 (le32_to_cpu(sb->num_devices) != mddev->raid_disks)) {
945 DMERR("Reshaping arrays not yet supported. (device count change)");
946 return -EINVAL;
947 }
948
949 if (!(rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC)))
950 mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset);
951
952
953
954
955
956
957
958
959
960
961
962
963 rdev_for_each(r, mddev) {
964 if (!test_bit(In_sync, &r->flags)) {
965 DMINFO("Device %d specified for rebuild: "
966 "Clearing superblock", r->raid_disk);
967 rebuilds++;
968 } else if (test_bit(FirstUse, &r->flags))
969 new_devs++;
970 }
971
972 if (!rebuilds) {
973 if (new_devs == mddev->raid_disks) {
974 DMINFO("Superblocks created for new array");
975 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
976 } else if (new_devs) {
977 DMERR("New device injected "
978 "into existing array without 'rebuild' "
979 "parameter specified");
980 return -EINVAL;
981 }
982 } else if (new_devs) {
983 DMERR("'rebuild' devices cannot be "
984 "injected into an array with other first-time devices");
985 return -EINVAL;
986 } else if (mddev->recovery_cp != MaxSector) {
987 DMERR("'rebuild' specified while array is not in-sync");
988 return -EINVAL;
989 }
990
991
992
993
994
995 rdev_for_each(r, mddev) {
996 if (!r->sb_page)
997 continue;
998 sb2 = page_address(r->sb_page);
999 sb2->failed_devices = 0;
1000
1001
1002
1003
1004 if (!test_bit(FirstUse, &r->flags) && (r->raid_disk >= 0)) {
1005 role = le32_to_cpu(sb2->array_position);
1006 if (role != r->raid_disk) {
1007 if (rs->raid_type->level != 1) {
1008 rs->ti->error = "Cannot change device "
1009 "positions in RAID array";
1010 return -EINVAL;
1011 }
1012 DMINFO("RAID1 device #%d now at position #%d",
1013 role, r->raid_disk);
1014 }
1015
1016
1017
1018
1019
1020 if (failed_devices & (1 << role))
1021 set_bit(Faulty, &r->flags);
1022 }
1023 }
1024
1025 return 0;
1026}
1027
1028static int super_validate(struct mddev *mddev, struct md_rdev *rdev)
1029{
1030 struct dm_raid_superblock *sb = page_address(rdev->sb_page);
1031
1032
1033
1034
1035
1036 if (!mddev->events && super_init_validation(mddev, rdev))
1037 return -EINVAL;
1038
1039 mddev->bitmap_info.offset = 4096 >> 9;
1040 rdev->mddev->bitmap_info.default_offset = 4096 >> 9;
1041 if (!test_bit(FirstUse, &rdev->flags)) {
1042 rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset);
1043 if (rdev->recovery_offset != MaxSector)
1044 clear_bit(In_sync, &rdev->flags);
1045 }
1046
1047
1048
1049
1050 if (test_bit(Faulty, &rdev->flags)) {
1051 clear_bit(Faulty, &rdev->flags);
1052 clear_bit(In_sync, &rdev->flags);
1053 rdev->saved_raid_disk = rdev->raid_disk;
1054 rdev->recovery_offset = 0;
1055 }
1056
1057 clear_bit(FirstUse, &rdev->flags);
1058
1059 return 0;
1060}
1061
1062
1063
1064
1065static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
1066{
1067 int ret;
1068 struct raid_dev *dev;
1069 struct md_rdev *rdev, *tmp, *freshest;
1070 struct mddev *mddev = &rs->md;
1071
1072 freshest = NULL;
1073 rdev_for_each_safe(rdev, tmp, mddev) {
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084 if (rs->print_flags & DMPF_SYNC)
1085 continue;
1086
1087 if (!rdev->meta_bdev)
1088 continue;
1089
1090 ret = super_load(rdev, freshest);
1091
1092 switch (ret) {
1093 case 1:
1094 freshest = rdev;
1095 break;
1096 case 0:
1097 break;
1098 default:
1099 dev = container_of(rdev, struct raid_dev, rdev);
1100 if (dev->meta_dev)
1101 dm_put_device(ti, dev->meta_dev);
1102
1103 dev->meta_dev = NULL;
1104 rdev->meta_bdev = NULL;
1105
1106 if (rdev->sb_page)
1107 put_page(rdev->sb_page);
1108
1109 rdev->sb_page = NULL;
1110
1111 rdev->sb_loaded = 0;
1112
1113
1114
1115
1116
1117
1118
1119 if (dev->data_dev)
1120 dm_put_device(ti, dev->data_dev);
1121
1122 dev->data_dev = NULL;
1123 rdev->bdev = NULL;
1124
1125 list_del(&rdev->same_set);
1126 }
1127 }
1128
1129 if (!freshest)
1130 return 0;
1131
1132 if (validate_raid_redundancy(rs)) {
1133 rs->ti->error = "Insufficient redundancy to activate array";
1134 return -EINVAL;
1135 }
1136
1137
1138
1139
1140
1141 ti->error = "Unable to assemble array: Invalid superblocks";
1142 if (super_validate(mddev, freshest))
1143 return -EINVAL;
1144
1145 rdev_for_each(rdev, mddev)
1146 if ((rdev != freshest) && super_validate(mddev, rdev))
1147 return -EINVAL;
1148
1149 return 0;
1150}
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
1162{
1163 int ret;
1164 struct raid_type *rt;
1165 unsigned long num_raid_params, num_raid_devs;
1166 struct raid_set *rs = NULL;
1167
1168
1169 if (argc < 2) {
1170 ti->error = "Too few arguments";
1171 return -EINVAL;
1172 }
1173
1174
1175 rt = get_raid_type(argv[0]);
1176 if (!rt) {
1177 ti->error = "Unrecognised raid_type";
1178 return -EINVAL;
1179 }
1180 argc--;
1181 argv++;
1182
1183
1184 if (strict_strtoul(argv[0], 10, &num_raid_params) < 0) {
1185 ti->error = "Cannot understand number of RAID parameters";
1186 return -EINVAL;
1187 }
1188 argc--;
1189 argv++;
1190
1191
1192 if (num_raid_params + 1 > argc) {
1193 ti->error = "Arguments do not agree with counts given";
1194 return -EINVAL;
1195 }
1196
1197 if ((strict_strtoul(argv[num_raid_params], 10, &num_raid_devs) < 0) ||
1198 (num_raid_devs >= INT_MAX)) {
1199 ti->error = "Cannot understand number of raid devices";
1200 return -EINVAL;
1201 }
1202
1203 rs = context_alloc(ti, rt, (unsigned)num_raid_devs);
1204 if (IS_ERR(rs))
1205 return PTR_ERR(rs);
1206
1207 ret = parse_raid_params(rs, argv, (unsigned)num_raid_params);
1208 if (ret)
1209 goto bad;
1210
1211 ret = -EINVAL;
1212
1213 argc -= num_raid_params + 1;
1214 argv += num_raid_params + 1;
1215
1216 if (argc != (num_raid_devs * 2)) {
1217 ti->error = "Supplied RAID devices does not match the count given";
1218 goto bad;
1219 }
1220
1221 ret = dev_parms(rs, argv);
1222 if (ret)
1223 goto bad;
1224
1225 rs->md.sync_super = super_sync;
1226 ret = analyse_superblocks(ti, rs);
1227 if (ret)
1228 goto bad;
1229
1230 INIT_WORK(&rs->md.event_work, do_table_event);
1231 ti->private = rs;
1232 ti->num_flush_bios = 1;
1233
1234 mutex_lock(&rs->md.reconfig_mutex);
1235 ret = md_run(&rs->md);
1236 rs->md.in_sync = 0;
1237 mutex_unlock(&rs->md.reconfig_mutex);
1238
1239 if (ret) {
1240 ti->error = "Fail to run raid array";
1241 goto bad;
1242 }
1243
1244 if (ti->len != rs->md.array_sectors) {
1245 ti->error = "Array size does not match requested target length";
1246 ret = -EINVAL;
1247 goto size_mismatch;
1248 }
1249 rs->callbacks.congested_fn = raid_is_congested;
1250 dm_table_add_target_callbacks(ti->table, &rs->callbacks);
1251
1252 mddev_suspend(&rs->md);
1253 return 0;
1254
1255size_mismatch:
1256 md_stop(&rs->md);
1257bad:
1258 context_free(rs);
1259
1260 return ret;
1261}
1262
1263static void raid_dtr(struct dm_target *ti)
1264{
1265 struct raid_set *rs = ti->private;
1266
1267 list_del_init(&rs->callbacks.list);
1268 md_stop(&rs->md);
1269 context_free(rs);
1270}
1271
1272static int raid_map(struct dm_target *ti, struct bio *bio)
1273{
1274 struct raid_set *rs = ti->private;
1275 struct mddev *mddev = &rs->md;
1276
1277 mddev->pers->make_request(mddev, bio);
1278
1279 return DM_MAPIO_SUBMITTED;
1280}
1281
1282static void raid_status(struct dm_target *ti, status_type_t type,
1283 unsigned status_flags, char *result, unsigned maxlen)
1284{
1285 struct raid_set *rs = ti->private;
1286 unsigned raid_param_cnt = 1;
1287 unsigned sz = 0;
1288 int i, array_in_sync = 0;
1289 sector_t sync;
1290
1291 switch (type) {
1292 case STATUSTYPE_INFO:
1293 DMEMIT("%s %d ", rs->raid_type->name, rs->md.raid_disks);
1294
1295 if (test_bit(MD_RECOVERY_RUNNING, &rs->md.recovery))
1296 sync = rs->md.curr_resync_completed;
1297 else
1298 sync = rs->md.recovery_cp;
1299
1300 if (sync >= rs->md.resync_max_sectors) {
1301 array_in_sync = 1;
1302 sync = rs->md.resync_max_sectors;
1303 } else {
1304
1305
1306
1307
1308
1309
1310 for (i = 0; i < rs->md.raid_disks; i++)
1311 if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
1312 array_in_sync = 1;
1313 }
1314
1315
1316
1317
1318
1319
1320 for (i = 0; i < rs->md.raid_disks; i++) {
1321 if (test_bit(Faulty, &rs->dev[i].rdev.flags))
1322 DMEMIT("D");
1323 else if (!array_in_sync ||
1324 !test_bit(In_sync, &rs->dev[i].rdev.flags))
1325 DMEMIT("a");
1326 else
1327 DMEMIT("A");
1328 }
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338 DMEMIT(" %llu/%llu",
1339 (unsigned long long) sync,
1340 (unsigned long long) rs->md.resync_max_sectors);
1341
1342 break;
1343 case STATUSTYPE_TABLE:
1344
1345 for (i = 0; i < rs->md.raid_disks; i++) {
1346 if ((rs->print_flags & DMPF_REBUILD) &&
1347 rs->dev[i].data_dev &&
1348 !test_bit(In_sync, &rs->dev[i].rdev.flags))
1349 raid_param_cnt += 2;
1350 if (rs->dev[i].data_dev &&
1351 test_bit(WriteMostly, &rs->dev[i].rdev.flags))
1352 raid_param_cnt += 2;
1353 }
1354
1355 raid_param_cnt += (hweight32(rs->print_flags & ~DMPF_REBUILD) * 2);
1356 if (rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC))
1357 raid_param_cnt--;
1358
1359 DMEMIT("%s %u %u", rs->raid_type->name,
1360 raid_param_cnt, rs->md.chunk_sectors);
1361
1362 if ((rs->print_flags & DMPF_SYNC) &&
1363 (rs->md.recovery_cp == MaxSector))
1364 DMEMIT(" sync");
1365 if (rs->print_flags & DMPF_NOSYNC)
1366 DMEMIT(" nosync");
1367
1368 for (i = 0; i < rs->md.raid_disks; i++)
1369 if ((rs->print_flags & DMPF_REBUILD) &&
1370 rs->dev[i].data_dev &&
1371 !test_bit(In_sync, &rs->dev[i].rdev.flags))
1372 DMEMIT(" rebuild %u", i);
1373
1374 if (rs->print_flags & DMPF_DAEMON_SLEEP)
1375 DMEMIT(" daemon_sleep %lu",
1376 rs->md.bitmap_info.daemon_sleep);
1377
1378 if (rs->print_flags & DMPF_MIN_RECOVERY_RATE)
1379 DMEMIT(" min_recovery_rate %d", rs->md.sync_speed_min);
1380
1381 if (rs->print_flags & DMPF_MAX_RECOVERY_RATE)
1382 DMEMIT(" max_recovery_rate %d", rs->md.sync_speed_max);
1383
1384 for (i = 0; i < rs->md.raid_disks; i++)
1385 if (rs->dev[i].data_dev &&
1386 test_bit(WriteMostly, &rs->dev[i].rdev.flags))
1387 DMEMIT(" write_mostly %u", i);
1388
1389 if (rs->print_flags & DMPF_MAX_WRITE_BEHIND)
1390 DMEMIT(" max_write_behind %lu",
1391 rs->md.bitmap_info.max_write_behind);
1392
1393 if (rs->print_flags & DMPF_STRIPE_CACHE) {
1394 struct r5conf *conf = rs->md.private;
1395
1396
1397 DMEMIT(" stripe_cache %d",
1398 conf ? conf->max_nr_stripes * 2 : 0);
1399 }
1400
1401 if (rs->print_flags & DMPF_REGION_SIZE)
1402 DMEMIT(" region_size %lu",
1403 rs->md.bitmap_info.chunksize >> 9);
1404
1405 if (rs->print_flags & DMPF_RAID10_COPIES)
1406 DMEMIT(" raid10_copies %u",
1407 raid10_md_layout_to_copies(rs->md.layout));
1408
1409 if (rs->print_flags & DMPF_RAID10_FORMAT)
1410 DMEMIT(" raid10_format %s",
1411 raid10_md_layout_to_format(rs->md.layout));
1412
1413 DMEMIT(" %d", rs->md.raid_disks);
1414 for (i = 0; i < rs->md.raid_disks; i++) {
1415 if (rs->dev[i].meta_dev)
1416 DMEMIT(" %s", rs->dev[i].meta_dev->name);
1417 else
1418 DMEMIT(" -");
1419
1420 if (rs->dev[i].data_dev)
1421 DMEMIT(" %s", rs->dev[i].data_dev->name);
1422 else
1423 DMEMIT(" -");
1424 }
1425 }
1426}
1427
1428static int raid_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
1429{
1430 struct raid_set *rs = ti->private;
1431 unsigned i;
1432 int ret = 0;
1433
1434 for (i = 0; !ret && i < rs->md.raid_disks; i++)
1435 if (rs->dev[i].data_dev)
1436 ret = fn(ti,
1437 rs->dev[i].data_dev,
1438 0,
1439 rs->md.dev_sectors,
1440 data);
1441
1442 return ret;
1443}
1444
1445static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
1446{
1447 struct raid_set *rs = ti->private;
1448 unsigned chunk_size = rs->md.chunk_sectors << 9;
1449 struct r5conf *conf = rs->md.private;
1450
1451 blk_limits_io_min(limits, chunk_size);
1452 blk_limits_io_opt(limits, chunk_size * (conf->raid_disks - conf->max_degraded));
1453}
1454
1455static void raid_presuspend(struct dm_target *ti)
1456{
1457 struct raid_set *rs = ti->private;
1458
1459 md_stop_writes(&rs->md);
1460}
1461
1462static void raid_postsuspend(struct dm_target *ti)
1463{
1464 struct raid_set *rs = ti->private;
1465
1466 mddev_suspend(&rs->md);
1467}
1468
1469static void raid_resume(struct dm_target *ti)
1470{
1471 struct raid_set *rs = ti->private;
1472
1473 set_bit(MD_CHANGE_DEVS, &rs->md.flags);
1474 if (!rs->bitmap_loaded) {
1475 bitmap_load(&rs->md);
1476 rs->bitmap_loaded = 1;
1477 }
1478
1479 clear_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
1480 mddev_resume(&rs->md);
1481}
1482
1483static struct target_type raid_target = {
1484 .name = "raid",
1485 .version = {1, 4, 2},
1486 .module = THIS_MODULE,
1487 .ctr = raid_ctr,
1488 .dtr = raid_dtr,
1489 .map = raid_map,
1490 .status = raid_status,
1491 .iterate_devices = raid_iterate_devices,
1492 .io_hints = raid_io_hints,
1493 .presuspend = raid_presuspend,
1494 .postsuspend = raid_postsuspend,
1495 .resume = raid_resume,
1496};
1497
1498static int __init dm_raid_init(void)
1499{
1500 DMINFO("Loading target version %u.%u.%u",
1501 raid_target.version[0],
1502 raid_target.version[1],
1503 raid_target.version[2]);
1504 return dm_register_target(&raid_target);
1505}
1506
1507static void __exit dm_raid_exit(void)
1508{
1509 dm_unregister_target(&raid_target);
1510}
1511
1512module_init(dm_raid_init);
1513module_exit(dm_raid_exit);
1514
1515MODULE_DESCRIPTION(DM_NAME " raid4/5/6 target");
1516MODULE_ALIAS("dm-raid1");
1517MODULE_ALIAS("dm-raid10");
1518MODULE_ALIAS("dm-raid4");
1519MODULE_ALIAS("dm-raid5");
1520MODULE_ALIAS("dm-raid6");
1521MODULE_AUTHOR("Neil Brown <dm-devel@redhat.com>");
1522MODULE_LICENSE("GPL");
1523